text stringlengths 4 1.02M | meta dict |
|---|---|
from os import environ
from django.core.management.base import BaseCommand, CommandError
from seed_services_client import StageBasedMessagingApiClient
from registrations.models import SubscriptionRequest
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = (
"This command will loop all subscription requests and find the "
"corresponding subscription in SBM and update the "
"initial_sequence_number field, we need this to fast forward the "
"subscription."
)
def add_arguments(self, parser):
parser.add_argument(
"--sbm-url",
dest="sbm_url",
type=validate_and_return_url,
default=environ.get("STAGE_BASED_MESSAGING_URL"),
help=("The Stage Based Messaging Service to verify " "subscriptions for."),
)
parser.add_argument(
"--sbm-token",
dest="sbm_token",
type=str,
default=environ.get("STAGE_BASED_MESSAGING_TOKEN"),
help=("The Authorization token for the SBM Service"),
)
def handle(self, *args, **kwargs):
sbm_url = kwargs["sbm_url"]
sbm_token = kwargs["sbm_token"]
if not sbm_url:
raise CommandError(
"Please make sure either the STAGE_BASED_MESSAGING_URL "
"environment variable or --sbm-url is set."
)
if not sbm_token:
raise CommandError(
"Please make sure either the STAGE_BASED_MESSAGING_TOKEN "
"environment variable or --sbm-token is set."
)
sbm_client = StageBasedMessagingApiClient(sbm_token, sbm_url)
sub_requests = SubscriptionRequest.objects.all().iterator()
updated = 0
for sub_request in sub_requests:
subscriptions = sbm_client.get_subscriptions(
{
"identity": sub_request.identity,
"created_after": sub_request.created_at,
"messageset": sub_request.messageset,
}
)
first = None
first_date = None
for sub in subscriptions["results"]:
created_at = sub["created_at"]
if not first_date or created_at < first_date:
first_date = created_at
first = sub
if first:
data = {"initial_sequence_number": sub_request.next_sequence_number}
sbm_client.update_subscription(first["id"], data)
updated += 1
else:
self.warning("Subscription not found: %s" % (sub_request.identity,))
self.success("Updated %d subscriptions." % (updated,))
def log(self, level, msg):
self.stdout.write(level(msg))
def warning(self, msg):
self.log(self.style.WARNING, msg)
def success(self, msg):
self.log(self.style.SUCCESS, msg)
| {
"content_hash": "519da937b6de003162c477b174faee8f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 87,
"avg_line_length": 32.84615384615385,
"alnum_prop": 0.5680829708932753,
"repo_name": "praekeltfoundation/ndoh-hub",
"id": "0a9a73ff069ae9e3db6a0a70ddf396cc308baf3b",
"size": "2989",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "registrations/management/commands/update_initial_sequence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "2200"
},
{
"name": "Python",
"bytes": "957306"
},
{
"name": "Shell",
"bytes": "2796"
}
],
"symlink_target": ""
} |
import sys
import unittest
from nose.config import Config
from nose.plugins import debug
from optparse import OptionParser
from StringIO import StringIO
class StubPdb:
called = False
def post_mortem(self, tb):
self.called = True
class TestPdbPlugin(unittest.TestCase):
def setUp(self):
self._pdb = debug.pdb
self._so = sys.stdout
debug.pdb = StubPdb()
def tearDown(self):
debug.pdb = self._pdb
sys.stdout = self._so
def test_plugin_api(self):
p = debug.Pdb()
p.addOptions
p.configure
p.addError
p.addFailure
def test_plugin_calls_pdb(self):
p = debug.Pdb()
try:
raise Exception("oops")
except:
err = sys.exc_info()
p.enabled = True
p.enabled_for_errors = True
p.enabled_for_failures = True
p.addError(None, err)
assert debug.pdb.called, "Did not call pdb.post_mortem on error"
debug.pdb.called = False
p.addFailure(None, err)
assert debug.pdb.called, "Did not call pdb.post_mortem on failure"
def test_command_line_options_enable(self):
parser = OptionParser()
p = debug.Pdb()
p.addOptions(parser)
options, args = parser.parse_args(['test_configuration',
'--pdb',
'--pdb-failures'])
p.configure(options, Config())
assert p.enabled
assert p.enabled_for_errors
assert p.enabled_for_failures
def test_disabled_by_default(self):
p = debug.Pdb()
assert not p.enabled
assert not p.enabled_for_failures
parser = OptionParser()
p.addOptions(parser)
options, args = parser.parse_args(['test_configuration'])
p.configure(options, Config())
assert not p.enabled
assert not p.enabled_for_errors
assert not p.enabled_for_failures
def test_env_settings_enable(self):
p = debug.Pdb()
assert not p.enabled
assert not p.enabled_for_failures
env = {'NOSE_PDB': '1',
'NOSE_PDB_FAILURES': '1'}
parser = OptionParser()
p.addOptions(parser, env)
options, args = parser.parse_args(['test_configuration'])
p.configure(options, Config())
assert p.enabled
assert p.enabled_for_errors
assert p.enabled_for_failures
def test_real_stdout_restored_before_call(self):
class CheckStdout(StubPdb):
def post_mortem(self, tb):
assert sys.stdout is sys.__stdout__, \
"sys.stdout was not restored to sys.__stdout__ " \
"before call"
debug.pdb = CheckStdout()
patch = StringIO()
sys.stdout = patch
p = debug.Pdb()
p.enabled = True
p.enabled_for_errors = True
try:
raise Exception("oops")
except:
err = sys.exc_info()
p.addError(None, err)
assert sys.stdout is patch, "sys.stdout was not reset after call"
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f45681ffc52fb5b0219349f8b6ce2641",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 74,
"avg_line_length": 27.700854700854702,
"alnum_prop": 0.5541499537179883,
"repo_name": "dbbhattacharya/kitsune",
"id": "cdd43f23c4a6ef6c895a40120d5e477218ef7ac4",
"size": "3241",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "vendor/packages/nose/unit_tests/test_pdb_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
"""Class to perform under-sampling based on the instance hardness
threshold."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.cross_validation import StratifiedKFold
from ..base import BaseBinarySampler
ESTIMATOR_KIND = ('knn', 'decision-tree', 'random-forest', 'adaboost',
'gradient-boosting', 'linear-svm')
class InstanceHardnessThreshold(BaseBinarySampler):
"""Class to perform under-sampling based on the instance hardness
threshold.
Parameters
----------
estimator : str, optional (default='linear-svm')
Classifier to be used in to estimate instance hardness of the samples.
The choices are the following: 'knn',
'decision-tree', 'random-forest', 'adaboost', 'gradient-boosting'
and 'linear-svm'.
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
cv : int, optional (default=5)
Number of folds to be used when estimating samples' instance hardness.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
cv : int, optional (default=5)
Number of folds used when estimating samples' instance hardness.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is based on [1]_.
This class does not support multi-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import RepeatedEditedNearestNeighbours
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> renn = RepeatedEditedNearestNeighbours(random_state=42)
>>> X_res, y_res = renn.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({1: 883, 0: 100})
References
----------
.. [1] D. Smith, Michael R., Tony Martinez, and Christophe Giraud-Carrier.
"An instance level analysis of data complexity." Machine learning
95.2 (2014): 225-256.
"""
def __init__(self, estimator='linear-svm', ratio='auto',
return_indices=False, random_state=None, cv=5, n_jobs=-1,
**kwargs):
super(InstanceHardnessThreshold, self).__init__(ratio=ratio)
self.estimator = estimator
self.return_indices = return_indices
self.random_state = random_state
self.kwargs = kwargs
self.cv = cv
self.n_jobs = n_jobs
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
if self.estimator not in ESTIMATOR_KIND:
raise NotImplementedError
# Select the appropriate classifier
if self.estimator == 'knn':
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier(
**self.kwargs)
elif self.estimator == 'decision-tree':
from sklearn.tree import DecisionTreeClassifier
estimator = DecisionTreeClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'random-forest':
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'adaboost':
from sklearn.ensemble import AdaBoostClassifier
estimator = AdaBoostClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'gradient-boosting':
from sklearn.ensemble import GradientBoostingClassifier
estimator = GradientBoostingClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'linear-svm':
from sklearn.svm import SVC
estimator = SVC(probability=True,
random_state=self.random_state,
kernel='linear',
**self.kwargs)
else:
raise NotImplementedError
# Create the different folds
skf = StratifiedKFold(y, n_folds=self.cv, shuffle=False,
random_state=self.random_state)
probabilities = np.zeros(y.shape[0], dtype=float)
for train_index, test_index in skf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
estimator.fit(X_train, y_train)
probs = estimator.predict_proba(X_test)
classes = estimator.classes_
probabilities[test_index] = [
probs[l, np.where(classes == c)[0][0]]
for l, c in enumerate(y_test)]
# Compute the number of cluster needed
if self.ratio == 'auto':
num_samples = self.stats_c_[self.min_c_]
else:
num_samples = int(self.stats_c_[self.min_c_] / self.ratio)
# Find the percentile corresponding to the top num_samples
threshold = np.percentile(
probabilities[y != self.min_c_],
(1. - (num_samples / self.stats_c_[self.maj_c_])) * 100.)
mask = np.logical_or(probabilities >= threshold, y == self.min_c_)
# Sample the data
X_resampled = X[mask]
y_resampled = y[mask]
self.logger.info('Under-sampling performed: %s', Counter(
y_resampled))
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.flatnonzero(mask)
return X_resampled, y_resampled, idx_under
else:
return X_resampled, y_resampled
| {
"content_hash": "61d565cd2457e500caae884b5837a0e4",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 80,
"avg_line_length": 36.357798165137616,
"alnum_prop": 0.6018168054504164,
"repo_name": "dvro/imbalanced-learn",
"id": "5951c077336992a0600911a2a2f556a1a95107c8",
"size": "7926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "imblearn/under_sampling/instance_hardness_threshold.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "235"
},
{
"name": "Makefile",
"bytes": "667"
},
{
"name": "Python",
"bytes": "376812"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
} |
import py, pytest
import sys
class TestPDB:
def pytest_funcarg__pdblist(self, request):
monkeypatch = request.getfuncargvalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('pdb')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = py.code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read()
assert "1 failed" in rest
assert "def test_1" not in rest
if child.isalive():
child.wait()
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
if child.isalive():
child.wait()
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read()
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
if child.isalive():
child.wait()
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
rest = child.read()
if child.isalive():
child.wait()
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read()
assert "1 failed" in rest
if child.isalive():
child.wait()
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read()
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
if child.isalive():
child.wait()
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" %(sys.executable, p1))
child.expect("x = 5")
child.sendeof()
child.wait()
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
child.wait()
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
| {
"content_hash": "107191ea39e7318f7625319d81dd8f55",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 71,
"avg_line_length": 30.524752475247524,
"alnum_prop": 0.49367499189101527,
"repo_name": "lotaku/pytest-2.3.5",
"id": "71781afb4ce432aae136d706b82e227af414e86b",
"size": "6166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_pdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12252"
},
{
"name": "Python",
"bytes": "816338"
}
],
"symlink_target": ""
} |
import sys
from django.core.management.commands.migrate import Command as MigrateCommand
from django.db import transaction
from tenant_schemas.utils import get_public_schema_name
def run_migrations(args, options, executor_codename, schema_name, allow_atomic=True):
from django.core.management import color
from django.core.management.base import OutputWrapper
from django.db import connection
style = color.color_style()
def style_func(msg):
return '[%s:%s] %s' % (
style.NOTICE(executor_codename),
style.NOTICE(schema_name),
msg
)
stdout = OutputWrapper(sys.stdout)
stdout.style_func = style_func
stderr = OutputWrapper(sys.stderr)
stderr.style_func = style_func
if int(options.get('verbosity', 1)) >= 1:
stdout.write(style.NOTICE("=== Running migrate for schema %s" % schema_name))
connection.set_schema(schema_name)
MigrateCommand(stdout=stdout, stderr=stderr).execute(*args, **options)
try:
transaction.commit()
connection.close()
connection.connection = None
except transaction.TransactionManagementError:
if not allow_atomic:
raise
# We are in atomic transaction, don't close connections
pass
connection.set_schema_to_public()
class MigrationExecutor(object):
codename = None
def __init__(self, args, options):
self.args = args
self.options = options
def run_migrations(self, tenants):
public_schema_name = get_public_schema_name()
if public_schema_name in tenants:
run_migrations(self.args, self.options, self.codename, public_schema_name)
tenants.pop(tenants.index(public_schema_name))
self.run_tenant_migrations(tenants)
def run_tenant_migrations(self, tenant):
raise NotImplementedError
| {
"content_hash": "5728ca5af0d7abc6361da07bba3496f3",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 86,
"avg_line_length": 29.421875,
"alnum_prop": 0.6691449814126395,
"repo_name": "mcanaves/django-tenant-schemas",
"id": "5ce6c599dc118e0d436d747809c5c5c40bec8e82",
"size": "1883",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tenant_schemas/migration_executors/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106800"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class CalendarioConfig(AppConfig):
name = 'frequencia.calendario'
| {
"content_hash": "b939f67470c4a69339be0e7cfcfeaf2f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 21.2,
"alnum_prop": 0.7830188679245284,
"repo_name": "bczmufrn/frequencia",
"id": "dd5fccb7ce863ce6c15ecb817c39222e43357a90",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frequencia/calendario/apps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28546"
},
{
"name": "HTML",
"bytes": "182716"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "94322"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <Security/SecItem.h>
"""
TYPES = """
const CFTypeRef kSecAttrKeyType;
const CFTypeRef kSecAttrKeySizeInBits;
const CFTypeRef kSecAttrIsPermanent;
const CFTypeRef kSecAttrKeyTypeRSA;
const CFTypeRef kSecAttrKeyTypeDSA;
const CFTypeRef kSecUseKeychain;
"""
FUNCTIONS = """
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| {
"content_hash": "903ce0ac5604672f85c5c9a3c1699a4e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 17.4,
"alnum_prop": 0.7402298850574712,
"repo_name": "viraptor/cryptography",
"id": "ac3dad3ffadb3c5ee920e61a01955bae0c72e12b",
"size": "981",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cryptography/hazmat/bindings/commoncrypto/secitem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from test.parser.pattern.matching.base import PatternMatcherBaseClass
class PatternMatcherISetTests(PatternMatcherBaseClass):
def test_basic_iset_match(self):
self.add_pattern_to_graph(pattern="I AM A <iset>MAN, WOMAN</iset>", topic="*", that="*", template="1")
context = self.match_sentence("I AM A MAN", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
context = self.match_sentence("I AM A WOMAN", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
| {
"content_hash": "6fc21a88f366a34d9d2d31dbbe502715",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 110,
"avg_line_length": 44.05882352941177,
"alnum_prop": 0.678237650200267,
"repo_name": "CHT5/program-y",
"id": "72bd7d0edde0c85f72b736e2d0a177f7950bd5bb",
"size": "750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/parser/pattern/matching/test_iset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "1027605"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
} |
import logging
import urllib3, socket, json
import time, math
# Parse config
from six.moves import configparser
config = configparser.ConfigParser()
config.read('bot.cfg')
wallet = config.get('main', 'wallet')
# MySQL requests
from common_mysql import mysql_select_accounts_list, mysql_select_accounts_list_extra
# Request to node
from common_rpc import *
# balances check
def balance_check():
time_start = int(time.time())
count = 0
diff_summ = 0
# list from MySQL
accounts_list = mysql_select_accounts_list()
accounts_list_extra = mysql_select_accounts_list_extra()
for account in accounts_list:
balance = int(account_balance(account[1]))
mysql_balance = int(account[3])
diff = abs(balance - mysql_balance)
diff_summ = diff_summ + diff
if (diff > 0):
count = count + 1
print(account[1])
print('{0} {1}'.format(mysql_balance, balance))
accounts_list = mysql_select_accounts_list()
for account in accounts_list_extra:
balance = int(account_balance(account[1]))
mysql_balance = int(account[3])
diff = abs(balance - mysql_balance)
diff_summ = diff_summ + diff
if (diff > 0):
count = count + 1
print(account[1])
print('{0} {1}'.format(mysql_balance, balance))
time_end = int(time.time())
total_time = time_end - time_start
print (total_time)
print ("{} inconsistent accounts\n{} inconsistent sum (xrb)".format(count, diff_summ))
balance_check() | {
"content_hash": "17fb4371020de952744240af7b2cf5d7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 87,
"avg_line_length": 25.178571428571427,
"alnum_prop": 0.6971631205673758,
"repo_name": "SergiySW/RaiWalletBot",
"id": "5dc7c4a983169895c84f8e71df9d1509f4209abc",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "balance_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "138250"
}
],
"symlink_target": ""
} |
"""
Resources needed by pkg providers
"""
import copy
import fnmatch
import logging
import os
import pprint
import salt.utils.data
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
__SUFFIX_NOT_NEEDED = ("x86_64", "noarch")
def _repack_pkgs(pkgs, normalize=True):
"""
Repack packages specified using "pkgs" argument to pkg states into a single
dictionary
"""
if normalize and "pkg.normalize_name" in __salt__:
_normalize_name = __salt__["pkg.normalize_name"]
else:
_normalize_name = lambda pkgname: pkgname
repacked_pkgs = {
_normalize_name(str(x)): str(y) if y is not None else y
for x, y in salt.utils.data.repack_dictlist(pkgs).items()
}
# Check if there were collisions in names
if len(pkgs) != len(repacked_pkgs):
raise SaltInvocationError(
"You are passing a list of packages that contains duplicated packages names: {}. This cannot be processed. In case you are targeting different versions of the same package, please target them individually".format(
pkgs
)
)
return repacked_pkgs
def pack_sources(sources, normalize=True):
"""
Accepts list of dicts (or a string representing a list of dicts) and packs
the key/value pairs into a single dict.
``'[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]'`` would become
``{"foo": "salt://foo.rpm", "bar": "salt://bar.rpm"}``
normalize : True
Normalize the package name by removing the architecture, if the
architecture of the package is different from the architecture of the
operating system. The ability to disable this behavior is useful for
poorly-created packages which include the architecture as an actual
part of the name, such as kernel modules which match a specific kernel
version.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.pack_sources '[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]'
"""
if normalize and "pkg.normalize_name" in __salt__:
_normalize_name = __salt__["pkg.normalize_name"]
else:
_normalize_name = lambda pkgname: pkgname
if isinstance(sources, str):
try:
sources = salt.utils.yaml.safe_load(sources)
except salt.utils.yaml.parser.ParserError as err:
log.error(err)
return {}
ret = {}
for source in sources:
if (not isinstance(source, dict)) or len(source) != 1:
log.error("Invalid input: %s", pprint.pformat(sources))
log.error("Input must be a list of 1-element dicts")
return {}
else:
key = next(iter(source))
ret[_normalize_name(key)] = source[key]
return ret
def parse_targets(
name=None, pkgs=None, sources=None, saltenv="base", normalize=True, **kwargs
):
"""
Parses the input to pkg.install and returns back the package(s) to be
installed. Returns a list of packages, as well as a string noting whether
the packages are to come from a repository or a binary package.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.parse_targets
"""
if "__env__" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("__env__")
if __grains__["os"] == "MacOS" and sources:
log.warning('Parameter "sources" ignored on MacOS hosts.')
version = kwargs.get("version")
if pkgs and sources:
log.error('Only one of "pkgs" and "sources" can be used.')
return None, None
elif "advisory_ids" in kwargs:
if pkgs:
log.error('Cannot use "advisory_ids" and "pkgs" at the same time')
return None, None
elif kwargs["advisory_ids"]:
return kwargs["advisory_ids"], "advisory"
else:
return [name], "advisory"
elif pkgs:
if version is not None:
log.warning(
"'version' argument will be ignored for multiple package targets"
)
pkgs = _repack_pkgs(pkgs, normalize=normalize)
if not pkgs:
return None, None
else:
return pkgs, "repository"
elif sources and __grains__["os"] != "MacOS":
if version is not None:
log.warning(
"'version' argument will be ignored for multiple package targets"
)
sources = pack_sources(sources, normalize=normalize)
if not sources:
return None, None
srcinfo = []
for pkg_name, pkg_src in sources.items():
if __salt__["config.valid_fileproto"](pkg_src):
# Cache package from remote source (salt master, HTTP, FTP) and
# append the cached path.
srcinfo.append(__salt__["cp.cache_file"](pkg_src, saltenv))
else:
# Package file local to the minion, just append the path to the
# package file.
if not os.path.isabs(pkg_src):
raise SaltInvocationError(
"Path {} for package {} is either not absolute or "
"an invalid protocol".format(pkg_src, pkg_name)
)
srcinfo.append(pkg_src)
return srcinfo, "file"
elif name:
if normalize:
_normalize_name = __salt__.get(
"pkg.normalize_name", lambda pkgname: pkgname
)
packed = {_normalize_name(x): version for x in name.split(",")}
else:
packed = {x: version for x in name.split(",")}
return packed, "repository"
else:
log.error("No package sources provided")
return None, None
def version(*names, **kwargs):
"""
Common interface for obtaining the version of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version vim
salt '*' pkg_resource.version foo bar baz
salt '*' pkg_resource.version 'python*'
"""
ret = {}
versions_as_list = salt.utils.data.is_true(kwargs.pop("versions_as_list", False))
pkg_glob = False
if names:
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
for name in names:
if "*" in name:
pkg_glob = True
for match in fnmatch.filter(pkgs, name):
ret[match] = pkgs.get(match, [])
else:
ret[name] = pkgs.get(name, [])
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
# Return a string if no globbing is used, and there is one item in the
# return dict
if len(ret) == 1 and not pkg_glob:
try:
return next(iter(ret.values()))
except StopIteration:
return ""
return ret
def add_pkg(pkgs, name, pkgver):
"""
Add a package to a dict of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.add_pkg '{}' bind 9
"""
try:
pkgs.setdefault(name, []).append(pkgver)
except AttributeError as exc:
log.exception(exc)
def sort_pkglist(pkgs):
"""
Accepts a dict obtained from pkg.list_pkgs() and sorts in place the list of
versions for any packages that have multiple versions installed, so that
two package lists can be compared to one another.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.sort_pkglist '["3.45", "2.13"]'
"""
# It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'],
# so long as the sorting is consistent.
try:
for key in pkgs:
# Passing the pkglist to set() also removes duplicate version
# numbers (if present).
pkgs[key] = sorted(set(pkgs[key]))
except AttributeError as exc:
log.exception(exc)
def stringify(pkgs):
"""
Takes a dict of package name/version information and joins each list of
installed versions into a string.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.stringify 'vim: 7.127'
"""
try:
for key in pkgs:
pkgs[key] = ",".join(pkgs[key])
except AttributeError as exc:
log.exception(exc)
def version_clean(verstr):
"""
Clean the version string removing extra data.
This function will simply try to call ``pkg.version_clean``.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version_clean <version_string>
"""
if verstr and "pkg.version_clean" in __salt__:
return __salt__["pkg.version_clean"](verstr)
return verstr
def version_compare(ver1, oper, ver2, ignore_epoch=False):
"""
.. versionadded:: 3001
Perform a version comparison, using (where available) platform-specific
version comparison tools to make the comparison.
ver1
The first version to be compared
oper
One of `==`, `!=`, `>=`, `<=`, `>`, `<`
ver2
The second version to be compared
.. note::
To avoid shell interpretation, each of the above values should be
quoted when this function is used on the CLI.
ignore_epoch : False
If ``True``, both package versions will have their epoch prefix
stripped before comparison.
This function is useful in Jinja templates, to perform specific actions
when a package's version meets certain criteria. For example:
.. code-block:: jinja
{%- set postfix_version = salt.pkg.version('postfix') %}
{%- if postfix_version and salt.pkg_resource.version_compare(postfix_version, '>=', '3.3', ignore_epoch=True) %}
{#- do stuff #}
{%- endif %}
CLI Examples:
.. code-block:: bash
salt myminion pkg_resource.version_compare '3.5' '<=' '2.4'
salt myminion pkg_resource.version_compare '3.5' '<=' '2.4' ignore_epoch=True
"""
return salt.utils.versions.compare(
ver1,
oper,
ver2,
ignore_epoch=ignore_epoch,
cmp_func=__salt__.get("version_cmp"),
)
def check_extra_requirements(pkgname, pkgver):
"""
Check if the installed package already has the given requirements.
This function will return the result of ``pkg.check_extra_requirements`` if
this function exists for the minion, otherwise it will return True.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.check_extra_requirements <pkgname> <extra_requirements>
"""
if pkgver and "pkg.check_extra_requirements" in __salt__:
return __salt__["pkg.check_extra_requirements"](pkgname, pkgver)
return True
def format_pkg_list(packages, versions_as_list, attr):
"""
Formats packages according to parameters for list_pkgs.
"""
ret = copy.deepcopy(packages)
if attr:
ret_attr = {}
requested_attr = {
"epoch",
"version",
"release",
"arch",
"install_date",
"install_date_time_t",
}
if attr != "all":
requested_attr &= set(attr + ["version"] + ["arch"])
for name in ret:
if "pkg.parse_arch" in __salt__:
_parse_arch = __salt__["pkg.parse_arch"](name)
else:
_parse_arch = {"name": name, "arch": None}
_name = _parse_arch["name"]
_arch = _parse_arch["arch"]
versions = []
pkgname = None
for all_attr in ret[name]:
filtered_attr = {}
for key in requested_attr:
if key in all_attr:
filtered_attr[key] = all_attr[key]
versions.append(filtered_attr)
if _name and filtered_attr.get("arch", None) == _arch:
pkgname = _name
ret_attr.setdefault(pkgname or name, []).extend(versions)
return ret_attr
for name in ret:
ret[name] = [
format_version(d["epoch"], d["version"], d["release"]) for d in ret[name]
]
if not versions_as_list:
stringify(ret)
return ret
def format_version(epoch, version, release):
"""
Formats a version string for list_pkgs.
"""
full_version = "{}:{}".format(epoch, version) if epoch else version
if release:
full_version += "-{}".format(release)
return full_version
| {
"content_hash": "35208b8a2a54d59ff2c18f966f226867",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 225,
"avg_line_length": 30.276442307692307,
"alnum_prop": 0.5797538705835649,
"repo_name": "saltstack/salt",
"id": "a6fa5eecd3fa34da9f36b0e661decf3dec4dc57a",
"size": "12595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/modules/pkg_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
"""Module for some node classes. More nodes in scoped_nodes.py
"""
import abc
import builtins as builtins_mod
import itertools
import pprint
from functools import lru_cache
from functools import singledispatch as _singledispatch
from astroid import as_string
from astroid import bases
from astroid import context as contextmod
from astroid import decorators
from astroid import exceptions
from astroid import manager
from astroid import mixins
from astroid import util
BUILTINS = builtins_mod.__name__
MANAGER = manager.AstroidManager()
@decorators.raise_if_nothing_inferred
def unpack_infer(stmt, context=None):
"""recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements
"""
if isinstance(stmt, (List, Tuple)):
for elt in stmt.elts:
if elt is util.Uninferable:
yield elt
continue
yield from unpack_infer(elt, context)
return dict(node=stmt, context=context)
# if inferred is a final node, return it and stop
inferred = next(stmt.infer(context))
if inferred is stmt:
yield inferred
return dict(node=stmt, context=context)
# else, infer recursively, except Uninferable object that should be returned as is
for inferred in stmt.infer(context):
if inferred is util.Uninferable:
yield inferred
else:
yield from unpack_infer(inferred, context)
return dict(node=stmt, context=context)
def are_exclusive(
stmt1, stmt2, exceptions=None
): # pylint: disable=redefined-outer-name
"""return true if the two given statements are mutually exclusive
`exceptions` may be a list of exception names. If specified, discard If
branches and check one of the statement is in an exception handler catching
one of the given exceptions.
algorithm :
1) index stmt1's parents
2) climb among stmt2's parents until we find a common parent
3) if the common parent is a If or TryExcept statement, look if nodes are
in exclusive branches
"""
# index stmt1's parents
stmt1_parents = {}
children = {}
node = stmt1.parent
previous = stmt1
while node:
stmt1_parents[node] = 1
children[node] = previous
previous = node
node = node.parent
# climb among stmt2's parents until we find a common parent
node = stmt2.parent
previous = stmt2
while node:
if node in stmt1_parents:
# if the common parent is a If or TryExcept statement, look if
# nodes are in exclusive branches
if isinstance(node, If) and exceptions is None:
if (
node.locate_child(previous)[1]
is not node.locate_child(children[node])[1]
):
return True
elif isinstance(node, TryExcept):
c2attr, c2node = node.locate_child(previous)
c1attr, c1node = node.locate_child(children[node])
if c1node is not c2node:
first_in_body_caught_by_handlers = (
c2attr == "handlers"
and c1attr == "body"
and previous.catch(exceptions)
)
second_in_body_caught_by_handlers = (
c2attr == "body"
and c1attr == "handlers"
and children[node].catch(exceptions)
)
first_in_else_other_in_handlers = (
c2attr == "handlers" and c1attr == "orelse"
)
second_in_else_other_in_handlers = (
c2attr == "orelse" and c1attr == "handlers"
)
if any(
(
first_in_body_caught_by_handlers,
second_in_body_caught_by_handlers,
first_in_else_other_in_handlers,
second_in_else_other_in_handlers,
)
):
return True
elif c2attr == "handlers" and c1attr == "handlers":
return previous is not children[node]
return False
previous = node
node = node.parent
return False
# getitem() helpers.
_SLICE_SENTINEL = object()
def _slice_value(index, context=None):
"""Get the value of the given slice index."""
if isinstance(index, Const):
if isinstance(index.value, (int, type(None))):
return index.value
elif index is None:
return None
else:
# Try to infer what the index actually is.
# Since we can't return all the possible values,
# we'll stop at the first possible value.
try:
inferred = next(index.infer(context=context))
except exceptions.InferenceError:
pass
else:
if isinstance(inferred, Const):
if isinstance(inferred.value, (int, type(None))):
return inferred.value
# Use a sentinel, because None can be a valid
# value that this function can return,
# as it is the case for unspecified bounds.
return _SLICE_SENTINEL
def _infer_slice(node, context=None):
lower = _slice_value(node.lower, context)
upper = _slice_value(node.upper, context)
step = _slice_value(node.step, context)
if all(elem is not _SLICE_SENTINEL for elem in (lower, upper, step)):
return slice(lower, upper, step)
raise exceptions.AstroidTypeError(
message="Could not infer slice used in subscript",
node=node,
index=node.parent,
context=context,
)
def _container_getitem(instance, elts, index, context=None):
"""Get a slice or an item, using the given *index*, for the given sequence."""
try:
if isinstance(index, Slice):
index_slice = _infer_slice(index, context=context)
new_cls = instance.__class__()
new_cls.elts = elts[index_slice]
new_cls.parent = instance.parent
return new_cls
if isinstance(index, Const):
return elts[index.value]
except IndexError as exc:
raise exceptions.AstroidIndexError(
message="Index {index!s} out of range",
node=instance,
index=index,
context=context,
) from exc
except TypeError as exc:
raise exceptions.AstroidTypeError(
message="Type error {error!r}", node=instance, index=index, context=context
) from exc
raise exceptions.AstroidTypeError("Could not use %s as subscript index" % index)
OP_PRECEDENCE = {
op: precedence
for precedence, ops in enumerate(
[
["Lambda"], # lambda x: x + 1
["IfExp"], # 1 if True else 2
["or"],
["and"],
["not"],
["Compare"], # in, not in, is, is not, <, <=, >, >=, !=, ==
["|"],
["^"],
["&"],
["<<", ">>"],
["+", "-"],
["*", "@", "/", "//", "%"],
["UnaryOp"], # +, -, ~
["**"],
["Await"],
]
)
for op in ops
}
class NodeNG:
""" A node of the new Abstract Syntax Tree (AST).
This is the base class for all Astroid node classes.
"""
is_statement = False
"""Whether this node indicates a statement.
:type: bool
"""
optional_assign = False # True for For (and for Comprehension if py <3.0)
"""Whether this node optionally assigns a variable.
This is for loop assignments because loop won't necessarily perform an
assignment if the loop has no iterations.
This is also the case from comprehensions in Python 2.
:type: bool
"""
is_function = False # True for FunctionDef nodes
"""Whether this node indicates a function.
:type: bool
"""
is_lambda = False
# Attributes below are set by the builder module or by raw factories
lineno = None
"""The line that this node appears on in the source code.
:type: int or None
"""
col_offset = None
"""The column that this node appears on in the source code.
:type: int or None
"""
parent = None
"""The parent node in the syntax tree.
:type: NodeNG or None
"""
_astroid_fields = ()
"""Node attributes that contain child nodes.
This is redefined in most concrete classes.
:type: tuple(str)
"""
_other_fields = ()
"""Node attributes that do not contain child nodes.
:type: tuple(str)
"""
_other_other_fields = ()
"""Attributes that contain AST-dependent fields.
:type: tuple(str)
"""
# instance specific inference function infer(node, context)
_explicit_inference = None
def __init__(self, lineno=None, col_offset=None, parent=None):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.lineno = lineno
self.col_offset = col_offset
self.parent = parent
def infer(self, context=None, **kwargs):
"""Get a generator of the inferred values.
This is the main entry point to the inference system.
.. seealso:: :ref:`inference`
If the instance has some explicit inference function set, it will be
called instead of the default interface.
:returns: The inferred values.
:rtype: iterable
"""
if context is not None:
context = context.extra_context.get(self, context)
if self._explicit_inference is not None:
# explicit_inference is not bound, give it self explicitly
try:
# pylint: disable=not-callable
return self._explicit_inference(self, context, **kwargs)
except exceptions.UseInferenceDefault:
pass
if not context:
return self._infer(context, **kwargs)
key = (self, context.lookupname, context.callcontext, context.boundnode)
if key in context.inferred:
return iter(context.inferred[key])
gen = context.cache_generator(key, self._infer(context, **kwargs))
return util.limit_inference(gen, MANAGER.max_inferable_values)
def _repr_name(self):
"""Get a name for nice representation.
This is either :attr:`name`, :attr:`attrname`, or the empty string.
:returns: The nice name.
:rtype: str
"""
names = {"name", "attrname"}
if all(name not in self._astroid_fields for name in names):
return getattr(self, "name", getattr(self, "attrname", ""))
return ""
def __str__(self):
rname = self._repr_name()
cname = type(self).__name__
if rname:
string = "%(cname)s.%(rname)s(%(fields)s)"
alignment = len(cname) + len(rname) + 2
else:
string = "%(cname)s(%(fields)s)"
alignment = len(cname) + 1
result = []
for field in self._other_fields + self._astroid_fields:
value = getattr(self, field)
width = 80 - len(field) - alignment
lines = pprint.pformat(value, indent=2, width=width).splitlines(True)
inner = [lines[0]]
for line in lines[1:]:
inner.append(" " * alignment + line)
result.append("%s=%s" % (field, "".join(inner)))
return string % {
"cname": cname,
"rname": rname,
"fields": (",\n" + " " * alignment).join(result),
}
def __repr__(self):
rname = self._repr_name()
if rname:
string = "<%(cname)s.%(rname)s l.%(lineno)s at 0x%(id)x>"
else:
string = "<%(cname)s l.%(lineno)s at 0x%(id)x>"
return string % {
"cname": type(self).__name__,
"rname": rname,
"lineno": self.fromlineno,
"id": id(self),
}
def accept(self, visitor):
"""Visit this node using the given visitor."""
func = getattr(visitor, "visit_" + self.__class__.__name__.lower())
return func(self)
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for field in self._astroid_fields:
attr = getattr(self, field)
if attr is None:
continue
if isinstance(attr, (list, tuple)):
yield from attr
else:
yield attr
def last_child(self):
"""An optimized version of list(get_children())[-1]
:returns: The last child, or None if no children exist.
:rtype: NodeNG or None
"""
for field in self._astroid_fields[::-1]:
attr = getattr(self, field)
if not attr: # None or empty listy / tuple
continue
if isinstance(attr, (list, tuple)):
return attr[-1]
return attr
return None
def parent_of(self, node):
"""Check if this node is the parent of the given node.
:param node: The node to check if it is the child.
:type node: NodeNG
:returns: True if this node is the parent of the given node,
False otherwise.
:rtype: bool
"""
parent = node.parent
while parent is not None:
if self is parent:
return True
parent = parent.parent
return False
def statement(self):
"""The first parent node, including self, marked as statement node.
:returns: The first parent statement.
:rtype: NodeNG
"""
if self.is_statement:
return self
return self.parent.statement()
def frame(self):
"""The first parent frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
or :class:`ClassDef`.
:returns: The first parent frame node.
:rtype: Module or FunctionDef or ClassDef
"""
return self.parent.frame()
def scope(self):
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
return self.parent.scope()
def root(self):
"""Return the root node of the syntax tree.
:returns: The root node.
:rtype: Module
"""
if self.parent:
return self.parent.root()
return self
def child_sequence(self, child):
"""Search for the sequence that contains this child.
:param child: The child node to search sequences for.
:type child: NodeNG
:returns: The sequence containing the given child node.
:rtype: iterable(NodeNG)
:raises AstroidError: If no sequence could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if node_or_sequence is child:
return [node_or_sequence]
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if (
isinstance(node_or_sequence, (tuple, list))
and child in node_or_sequence
):
return node_or_sequence
msg = "Could not find %s in %s's children"
raise exceptions.AstroidError(msg % (repr(child), repr(self)))
def locate_child(self, child):
"""Find the field of this node that contains the given child.
:param child: The child node to search fields for.
:type child: NodeNG
:returns: A tuple of the name of the field that contains the child,
and the sequence or node that contains the child node.
:rtype: tuple(str, iterable(NodeNG) or NodeNG)
:raises AstroidError: If no field could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if child is node_or_sequence:
return field, child
if (
isinstance(node_or_sequence, (tuple, list))
and child in node_or_sequence
):
return field, node_or_sequence
msg = "Could not find %s in %s's children"
raise exceptions.AstroidError(msg % (repr(child), repr(self)))
# FIXME : should we merge child_sequence and locate_child ? locate_child
# is only used in are_exclusive, child_sequence one time in pylint.
def next_sibling(self):
"""The next sibling statement node.
:returns: The next sibling statement node.
:rtype: NodeNG or None
"""
return self.parent.next_sibling()
def previous_sibling(self):
"""The previous sibling statement.
:returns: The previous sibling statement node.
:rtype: NodeNG or None
"""
return self.parent.previous_sibling()
def nearest(self, nodes):
"""Get the node closest to this one from the given list of nodes.
:param nodes: The list of nodes to search. All of these nodes must
belong to the same module as this one. The list should be
sorted by the line number of the nodes, smallest first.
:type nodes: iterable(NodeNG)
:returns: The node closest to this one in the source code,
or None if one could not be found.
:rtype: NodeNG or None
"""
myroot = self.root()
mylineno = self.fromlineno
nearest = None, 0
for node in nodes:
assert node.root() is myroot, (
"nodes %s and %s are not from the same module" % (self, node)
)
lineno = node.fromlineno
if node.fromlineno > mylineno:
break
if lineno > nearest[1]:
nearest = node, lineno
# FIXME: raise an exception if nearest is None ?
return nearest[0]
# these are lazy because they're relatively expensive to compute for every
# single node, and they rarely get looked at
@decorators.cachedproperty
def fromlineno(self):
"""The first line that this node appears on in the source code.
:type: int or None
"""
if self.lineno is None:
return self._fixed_source_line()
return self.lineno
@decorators.cachedproperty
def tolineno(self):
"""The last line that this node appears on in the source code.
:type: int or None
"""
if not self._astroid_fields:
# can't have children
lastchild = None
else:
lastchild = self.last_child()
if lastchild is None:
return self.fromlineno
return lastchild.tolineno
def _fixed_source_line(self):
"""Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
:returns: The line number of this node,
or None if this could not be determined.
:rtype: int or None
"""
line = self.lineno
_node = self
try:
while line is None:
_node = next(_node.get_children())
line = _node.lineno
except StopIteration:
_node = self.parent
while _node and line is None:
line = _node.lineno
_node = _node.parent
return line
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int or None)
"""
return lineno, self.tolineno
def set_local(self, name, stmt):
"""Define that the given name is declared in the given statement node.
This definition is stored on the parent scope node.
.. seealso:: :meth:`scope`
:param name: The name that is being defined.
:type name: str
:param stmt: The statement that defines the given name.
:type stmt: NodeNG
"""
self.parent.set_local(name, stmt)
def nodes_of_class(self, klass, skip_klass=None):
"""Get the nodes (including this one or below) of the given type.
:param klass: The type of node to search for.
:type klass: builtins.type
:param skip_klass: A type of node to ignore. This is useful to ignore
subclasses of :attr:`klass`.
:type skip_klass: builtins.type
:returns: The node of the given type.
:rtype: iterable(NodeNG)
"""
if isinstance(self, klass):
yield self
if skip_klass is None:
for child_node in self.get_children():
yield from child_node.nodes_of_class(klass, skip_klass)
return
for child_node in self.get_children():
if isinstance(child_node, skip_klass):
continue
yield from child_node.nodes_of_class(klass, skip_klass)
@decorators.cached
def _get_assign_nodes(self):
return []
def _get_name_nodes(self):
for child_node in self.get_children():
yield from child_node._get_name_nodes()
def _get_return_nodes_skip_functions(self):
yield from ()
def _get_yield_nodes_skip_lambdas(self):
yield from ()
def _infer_name(self, frame, name):
# overridden for ImportFrom, Import, Global, TryExcept and Arguments
pass
def _infer(self, context=None):
"""we don't know how to resolve a statement by default"""
# this method is overridden by most concrete classes
raise exceptions.InferenceError(
"No inference function for {node!r}.", node=self, context=context
)
def inferred(self):
"""Get a list of the inferred values.
.. seealso:: :ref:`inference`
:returns: The inferred values.
:rtype: list
"""
return list(self.infer())
def instantiate_class(self):
"""Instantiate an instance of the defined class.
.. note::
On anything other than a :class:`ClassDef` this will return self.
:returns: An instance of the defined class.
:rtype: object
"""
return self
def has_base(self, node):
"""Check if this node inherits from the given type.
:param node: The node defining the base to look for.
Usually this is a :class:`Name` node.
:type node: NodeNG
"""
return False
def callable(self):
"""Whether this node defines something that is callable.
:returns: True if this defines something that is callable,
False otherwise.
:rtype: bool
"""
return False
def eq(self, value):
return False
def as_string(self):
"""Get the source code that this node represents.
:returns: The source code.
:rtype: str
"""
return as_string.to_code(self)
def repr_tree(
self,
ids=False,
include_linenos=False,
ast_state=False,
indent=" ",
max_depth=0,
max_width=80,
):
"""Get a string representation of the AST from this node.
:param ids: If true, includes the ids with the node type names.
:type ids: bool
:param include_linenos: If true, includes the line numbers and
column offsets.
:type include_linenos: bool
:param ast_state: If true, includes information derived from
the whole AST like local and global variables.
:type ast_state: bool
:param indent: A string to use to indent the output string.
:type indent: str
:param max_depth: If set to a positive integer, won't return
nodes deeper than max_depth in the string.
:type max_depth: int
:param max_width: Attempt to format the output string to stay
within this number of characters, but can exceed it under some
circumstances. Only positive integer values are valid, the default is 80.
:type max_width: int
:returns: The string representation of the AST.
:rtype: str
"""
# pylint: disable=too-many-statements
@_singledispatch
def _repr_tree(node, result, done, cur_indent="", depth=1):
"""Outputs a representation of a non-tuple/list, non-node that's
contained within an AST, including strings.
"""
lines = pprint.pformat(
node, width=max(max_width - len(cur_indent), 1)
).splitlines(True)
result.append(lines[0])
result.extend([cur_indent + line for line in lines[1:]])
return len(lines) != 1
# pylint: disable=unused-variable; doesn't understand singledispatch
@_repr_tree.register(tuple)
@_repr_tree.register(list)
def _repr_seq(node, result, done, cur_indent="", depth=1):
"""Outputs a representation of a sequence that's contained within an AST."""
cur_indent += indent
result.append("[")
if not node:
broken = False
elif len(node) == 1:
broken = _repr_tree(node[0], result, done, cur_indent, depth)
elif len(node) == 2:
broken = _repr_tree(node[0], result, done, cur_indent, depth)
if not broken:
result.append(", ")
else:
result.append(",\n")
result.append(cur_indent)
broken = _repr_tree(node[1], result, done, cur_indent, depth) or broken
else:
result.append("\n")
result.append(cur_indent)
for child in node[:-1]:
_repr_tree(child, result, done, cur_indent, depth)
result.append(",\n")
result.append(cur_indent)
_repr_tree(node[-1], result, done, cur_indent, depth)
broken = True
result.append("]")
return broken
# pylint: disable=unused-variable; doesn't understand singledispatch
@_repr_tree.register(NodeNG)
def _repr_node(node, result, done, cur_indent="", depth=1):
"""Outputs a strings representation of an astroid node."""
if node in done:
result.append(
indent
+ "<Recursion on %s with id=%s" % (type(node).__name__, id(node))
)
return False
done.add(node)
if max_depth and depth > max_depth:
result.append("...")
return False
depth += 1
cur_indent += indent
if ids:
result.append("%s<0x%x>(\n" % (type(node).__name__, id(node)))
else:
result.append("%s(" % type(node).__name__)
fields = []
if include_linenos:
fields.extend(("lineno", "col_offset"))
fields.extend(node._other_fields)
fields.extend(node._astroid_fields)
if ast_state:
fields.extend(node._other_other_fields)
if not fields:
broken = False
elif len(fields) == 1:
result.append("%s=" % fields[0])
broken = _repr_tree(
getattr(node, fields[0]), result, done, cur_indent, depth
)
else:
result.append("\n")
result.append(cur_indent)
for field in fields[:-1]:
result.append("%s=" % field)
_repr_tree(getattr(node, field), result, done, cur_indent, depth)
result.append(",\n")
result.append(cur_indent)
result.append("%s=" % fields[-1])
_repr_tree(getattr(node, fields[-1]), result, done, cur_indent, depth)
broken = True
result.append(")")
return broken
result = []
_repr_tree(self, result, set())
return "".join(result)
def bool_value(self):
"""Determine the boolean value of this node.
The boolean value of a node can have three
possible values:
* False: For instance, empty data structures,
False, empty strings, instances which return
explicitly False from the __nonzero__ / __bool__
method.
* True: Most of constructs are True by default:
classes, functions, modules etc
* Uninferable: The inference engine is uncertain of the
node's value.
:returns: The boolean value of this node.
:rtype: bool or Uninferable
"""
return util.Uninferable
def op_precedence(self):
# Look up by class name or default to highest precedence
return OP_PRECEDENCE.get(self.__class__.__name__, len(OP_PRECEDENCE))
def op_left_associative(self):
# Everything is left associative except `**` and IfExp
return True
class Statement(NodeNG):
"""Statement node adding a few attributes"""
is_statement = True
"""Whether this node indicates a statement.
:type: bool
"""
def next_sibling(self):
"""The next sibling statement node.
:returns: The next sibling statement node.
:rtype: NodeNG or None
"""
stmts = self.parent.child_sequence(self)
index = stmts.index(self)
try:
return stmts[index + 1]
except IndexError:
pass
def previous_sibling(self):
"""The previous sibling statement.
:returns: The previous sibling statement node.
:rtype: NodeNG or None
"""
stmts = self.parent.child_sequence(self)
index = stmts.index(self)
if index >= 1:
return stmts[index - 1]
return None
class _BaseContainer(
mixins.ParentAssignTypeMixin, NodeNG, bases.Instance, metaclass=abc.ABCMeta
):
"""Base class for Set, FrozenSet, Tuple and List."""
_astroid_fields = ("elts",)
def __init__(self, lineno=None, col_offset=None, parent=None):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.elts = []
"""The elements in the node.
:type: list(NodeNG)
"""
super(_BaseContainer, self).__init__(lineno, col_offset, parent)
def postinit(self, elts):
"""Do some setup after initialisation.
:param elts: The list of elements the that node contains.
:type elts: list(NodeNG)
"""
self.elts = elts
@classmethod
def from_constants(cls, elts=None):
"""Create a node of this type from the given list of elements.
:param elts: The list of elements that the node should contain.
:type elts: list(NodeNG)
:returns: A new node containing the given elements.
:rtype: NodeNG
"""
node = cls()
if elts is None:
node.elts = []
else:
node.elts = [const_factory(e) for e in elts]
return node
def itered(self):
"""An iterator over the elements this node contains.
:returns: The contents of this node.
:rtype: iterable(NodeNG)
"""
return self.elts
def bool_value(self):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool or Uninferable
"""
return bool(self.elts)
@abc.abstractmethod
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
def get_children(self):
yield from self.elts
class LookupMixIn:
"""Mixin to look up a name in the right scope."""
@lru_cache(maxsize=None)
def lookup(self, name):
"""Lookup where the given variable is assigned.
The lookup starts from self's scope. If self is not a frame itself
and the name is found in the inner frame locals, statements will be
filtered to remove ignorable statements according to self's location.
:param name: The name of the variable to find assignments for.
:type name: str
:returns: The scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
return self.scope().scope_lookup(self, name)
def ilookup(self, name):
"""Lookup the inferred values of the given variable.
:param name: The variable name to find values for.
:type name: str
:returns: The inferred values of the statements returned from
:meth:`lookup`.
:rtype: iterable
"""
frame, stmts = self.lookup(name)
context = contextmod.InferenceContext()
return bases._infer_stmts(stmts, context, frame)
def _get_filtered_node_statements(self, nodes):
statements = [(node, node.statement()) for node in nodes]
# Next we check if we have ExceptHandlers that are parent
# of the underlying variable, in which case the last one survives
if len(statements) > 1 and all(
isinstance(stmt, ExceptHandler) for _, stmt in statements
):
statements = [
(node, stmt) for node, stmt in statements if stmt.parent_of(self)
]
return statements
def _filter_stmts(self, stmts, frame, offset):
"""Filter the given list of statements to remove ignorable statements.
If self is not a frame itself and the name is found in the inner
frame locals, statements will be filtered to remove ignorable
statements according to self's location.
:param stmts: The statements to filter.
:type stmts: list(NodeNG)
:param frame: The frame that all of the given statements belong to.
:type frame: NodeNG
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: The filtered statements.
:rtype: list(NodeNG)
"""
# if offset == -1, my actual frame is not the inner frame but its parent
#
# class A(B): pass
#
# we need this to resolve B correctly
if offset == -1:
myframe = self.frame().parent.frame()
else:
myframe = self.frame()
# If the frame of this node is the same as the statement
# of this node, then the node is part of a class or
# a function definition and the frame of this node should be the
# the upper frame, not the frame of the definition.
# For more information why this is important,
# see Pylint issue #295.
# For example, for 'b', the statement is the same
# as the frame / scope:
#
# def test(b=1):
# ...
if self.statement() is myframe and myframe.parent:
myframe = myframe.parent.frame()
mystmt = self.statement()
# line filtering if we are in the same frame
#
# take care node may be missing lineno information (this is the case for
# nodes inserted for living objects)
if myframe is frame and mystmt.fromlineno is not None:
assert mystmt.fromlineno is not None, mystmt
mylineno = mystmt.fromlineno + offset
else:
# disabling lineno filtering
mylineno = 0
_stmts = []
_stmt_parents = []
statements = self._get_filtered_node_statements(stmts)
for node, stmt in statements:
# line filtering is on and we have reached our location, break
if stmt.fromlineno > mylineno > 0:
break
# Ignore decorators with the same name as the
# decorated function
# Fixes issue #375
if mystmt is stmt and is_from_decorator(self):
continue
assert hasattr(node, "assign_type"), (
node,
node.scope(),
node.scope().locals,
)
assign_type = node.assign_type()
if node.has_base(self):
break
_stmts, done = assign_type._get_filtered_stmts(self, node, _stmts, mystmt)
if done:
break
optional_assign = assign_type.optional_assign
if optional_assign and assign_type.parent_of(self):
# we are inside a loop, loop var assignment is hiding previous
# assignment
_stmts = [node]
_stmt_parents = [stmt.parent]
continue
# XXX comment various branches below!!!
try:
pindex = _stmt_parents.index(stmt.parent)
except ValueError:
pass
else:
# we got a parent index, this means the currently visited node
# is at the same block level as a previously visited node
if _stmts[pindex].assign_type().parent_of(assign_type):
# both statements are not at the same block level
continue
# if currently visited node is following previously considered
# assignment and both are not exclusive, we can drop the
# previous one. For instance in the following code ::
#
# if a:
# x = 1
# else:
# x = 2
# print x
#
# we can't remove neither x = 1 nor x = 2 when looking for 'x'
# of 'print x'; while in the following ::
#
# x = 1
# x = 2
# print x
#
# we can remove x = 1 when we see x = 2
#
# moreover, on loop assignment types, assignment won't
# necessarily be done if the loop has no iteration, so we don't
# want to clear previous assignments if any (hence the test on
# optional_assign)
if not (optional_assign or are_exclusive(_stmts[pindex], node)):
if (
# In case of partial function node, if the statement is different
# from the origin function then it can be deleted otherwise it should
# remain to be able to correctly infer the call to origin function.
not node.is_function
or node.qname() != "PartialFunction"
or node.name != _stmts[pindex].name
):
del _stmt_parents[pindex]
del _stmts[pindex]
if isinstance(node, AssignName):
if not optional_assign and stmt.parent is mystmt.parent:
_stmts = []
_stmt_parents = []
elif isinstance(node, DelName):
_stmts = []
_stmt_parents = []
continue
if not are_exclusive(self, node):
_stmts.append(node)
_stmt_parents.append(stmt.parent)
return _stmts
# Name classes
class AssignName(
mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG
):
"""Variation of :class:`ast.Assign` representing assignment to a name.
An :class:`AssignName` is the name of something that is assigned to.
This includes variables defined in a function signature or in a loop.
>>> node = astroid.extract_node('variable = range(10)')
>>> node
<Assign l.1 at 0x7effe1db8550>
>>> list(node.get_children())
[<AssignName.variable l.1 at 0x7effe1db8748>, <Call l.1 at 0x7effe1db8630>]
>>> list(node.get_children())[0].as_string()
'variable'
"""
_other_fields = ("name",)
def __init__(self, name=None, lineno=None, col_offset=None, parent=None):
"""
:param name: The name that is assigned to.
:type name: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.name = name
"""The name that is assigned to.
:type: str or None
"""
super(AssignName, self).__init__(lineno, col_offset, parent)
class DelName(
mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG
):
"""Variation of :class:`ast.Delete` representing deletion of a name.
A :class:`DelName` is the name of something that is deleted.
>>> node = astroid.extract_node("del variable #@")
>>> list(node.get_children())
[<DelName.variable l.1 at 0x7effe1da4d30>]
>>> list(node.get_children())[0].as_string()
'variable'
"""
_other_fields = ("name",)
def __init__(self, name=None, lineno=None, col_offset=None, parent=None):
"""
:param name: The name that is being deleted.
:type name: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.name = name
"""The name that is being deleted.
:type: str or None
"""
super(DelName, self).__init__(lineno, col_offset, parent)
class Name(mixins.NoChildrenMixin, LookupMixIn, NodeNG):
"""Class representing an :class:`ast.Name` node.
A :class:`Name` node is something that is named, but not covered by
:class:`AssignName` or :class:`DelName`.
>>> node = astroid.extract_node('range(10)')
>>> node
<Call l.1 at 0x7effe1db8710>
>>> list(node.get_children())
[<Name.range l.1 at 0x7effe1db86a0>, <Const.int l.1 at 0x7effe1db8518>]
>>> list(node.get_children())[0].as_string()
'range'
"""
_other_fields = ("name",)
def __init__(self, name=None, lineno=None, col_offset=None, parent=None):
"""
:param name: The name that this node refers to.
:type name: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.name = name
"""The name that this node refers to.
:type: str or None
"""
super(Name, self).__init__(lineno, col_offset, parent)
def _get_name_nodes(self):
yield self
for child_node in self.get_children():
yield from child_node._get_name_nodes()
class Arguments(mixins.AssignTypeMixin, NodeNG):
"""Class representing an :class:`ast.arguments` node.
An :class:`Arguments` node represents that arguments in a
function definition.
>>> node = astroid.extract_node('def foo(bar): pass')
>>> node
<FunctionDef.foo l.1 at 0x7effe1db8198>
>>> node.args
<Arguments l.1 at 0x7effe1db82e8>
"""
# Python 3.4+ uses a different approach regarding annotations,
# each argument is a new class, _ast.arg, which exposes an
# 'annotation' attribute. In astroid though, arguments are exposed
# as is in the Arguments node and the only way to expose annotations
# is by using something similar with Python 3.3:
# - we expose 'varargannotation' and 'kwargannotation' of annotations
# of varargs and kwargs.
# - we expose 'annotation', a list with annotations for
# for each normal argument. If an argument doesn't have an
# annotation, its value will be None.
_astroid_fields = (
"args",
"defaults",
"kwonlyargs",
"kw_defaults",
"annotations",
"varargannotation",
"kwargannotation",
"kwonlyargs_annotations",
)
varargannotation = None
"""The type annotation for the variable length arguments.
:type: NodeNG
"""
kwargannotation = None
"""The type annotation for the variable length keyword arguments.
:type: NodeNG
"""
_other_fields = ("vararg", "kwarg")
def __init__(self, vararg=None, kwarg=None, parent=None):
"""
:param vararg: The name of the variable length arguments.
:type vararg: str or None
:param kwarg: The name of the variable length keyword arguments.
:type kwarg: str or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
super(Arguments, self).__init__(parent=parent)
self.vararg = vararg
"""The name of the variable length arguments.
:type: str or None
"""
self.kwarg = kwarg
"""The name of the variable length keyword arguments.
:type: str or None
"""
self.args = []
"""The names of the required arguments.
:type: list(AssignName)
"""
self.defaults = []
"""The default values for arguments that can be passed positionally.
:type: list(NodeNG)
"""
self.kwonlyargs = []
"""The keyword arguments that cannot be passed positionally.
:type: list(AssignName)
"""
self.kw_defaults = []
"""The default values for keyword arguments that cannot be passed positionally.
:type: list(NodeNG)
"""
self.annotations = []
"""The type annotations of arguments that can be passed positionally.
:type: list(NodeNG)
"""
self.kwonlyargs_annotations = []
"""The type annotations of arguments that cannot be passed positionally.
:type: list(NodeNG)
"""
def postinit(
self,
args,
defaults,
kwonlyargs,
kw_defaults,
annotations,
kwonlyargs_annotations=None,
varargannotation=None,
kwargannotation=None,
):
"""Do some setup after initialisation.
:param args: The names of the required arguments.
:type args: list(AssignName)
:param defaults: The default values for arguments that can be passed
positionally.
:type defaults: list(NodeNG)
:param kwonlyargs: The keyword arguments that cannot be passed
positionally.
:type kwonlyargs: list(AssignName)
:param kw_defaults: The default values for keyword arguments that
cannot be passed positionally.
:type kw_defaults: list(NodeNG)
:param annotations: The type annotations of arguments that can be
passed positionally.
:type annotations: list(NodeNG)
:param kwonlyargs_annotations: The type annotations of arguments that
cannot be passed positionally. This should always be passed in
Python 3.
:type kwonlyargs_annotations: list(NodeNG)
:param varargannotation: The type annotation for the variable length
arguments.
:type varargannotation: NodeNG
:param kwargannotation: The type annotation for the variable length
keyword arguments.
:type kwargannotation: NodeNG
"""
self.args = args
self.defaults = defaults
self.kwonlyargs = kwonlyargs
self.kw_defaults = kw_defaults
self.annotations = annotations
self.kwonlyargs_annotations = kwonlyargs_annotations
self.varargannotation = varargannotation
self.kwargannotation = kwargannotation
def _infer_name(self, frame, name):
if self.parent is frame:
return name
return None
@decorators.cachedproperty
def fromlineno(self):
"""The first line that this node appears on in the source code.
:type: int or None
"""
lineno = super(Arguments, self).fromlineno
return max(lineno, self.parent.fromlineno or 0)
def format_args(self):
"""Get the arguments formatted as string.
:returns: The formatted arguments.
:rtype: str
"""
result = []
if self.args:
result.append(
_format_args(
self.args, self.defaults, getattr(self, "annotations", None)
)
)
if self.vararg:
result.append("*%s" % self.vararg)
if self.kwonlyargs:
if not self.vararg:
result.append("*")
result.append(
_format_args(
self.kwonlyargs, self.kw_defaults, self.kwonlyargs_annotations
)
)
if self.kwarg:
result.append("**%s" % self.kwarg)
return ", ".join(result)
def default_value(self, argname):
"""Get the default value for an argument.
:param argname: The name of the argument to get the default value for.
:type argname: str
:raises NoDefault: If there is no default value defined for the
given argument.
"""
i = _find_arg(argname, self.args)[0]
if i is not None:
idx = i - (len(self.args) - len(self.defaults))
if idx >= 0:
return self.defaults[idx]
i = _find_arg(argname, self.kwonlyargs)[0]
if i is not None and self.kw_defaults[i] is not None:
return self.kw_defaults[i]
raise exceptions.NoDefault(func=self.parent, name=argname)
def is_argument(self, name):
"""Check if the given name is defined in the arguments.
:param name: The name to check for.
:type name: str
:returns: True if the given name is defined in the arguments,
False otherwise.
:rtype: bool
"""
if name == self.vararg:
return True
if name == self.kwarg:
return True
return (
self.find_argname(name, True)[1] is not None
or self.kwonlyargs
and _find_arg(name, self.kwonlyargs, True)[1] is not None
)
def find_argname(self, argname, rec=False):
"""Get the index and :class:`AssignName` node for given name.
:param argname: The name of the argument to search for.
:type argname: str
:param rec: Whether or not to include arguments in unpacked tuples
in the search.
:type rec: bool
:returns: The index and node for the argument.
:rtype: tuple(str or None, AssignName or None)
"""
if self.args: # self.args may be None in some cases (builtin function)
return _find_arg(argname, self.args, rec)
return None, None
def get_children(self):
yield from self.args or ()
yield from self.defaults
yield from self.kwonlyargs
for elt in self.kw_defaults:
if elt is not None:
yield elt
for elt in self.annotations:
if elt is not None:
yield elt
if self.varargannotation is not None:
yield self.varargannotation
if self.kwargannotation is not None:
yield self.kwargannotation
for elt in self.kwonlyargs_annotations:
if elt is not None:
yield elt
def _find_arg(argname, args, rec=False):
for i, arg in enumerate(args):
if isinstance(arg, Tuple):
if rec:
found = _find_arg(argname, arg.elts)
if found[0] is not None:
return found
elif arg.name == argname:
return i, arg
return None, None
def _format_args(args, defaults=None, annotations=None):
values = []
if args is None:
return ""
if annotations is None:
annotations = []
if defaults is not None:
default_offset = len(args) - len(defaults)
packed = itertools.zip_longest(args, annotations)
for i, (arg, annotation) in enumerate(packed):
if isinstance(arg, Tuple):
values.append("(%s)" % _format_args(arg.elts))
else:
argname = arg.name
if annotation is not None:
argname += ":" + annotation.as_string()
values.append(argname)
if defaults is not None and i >= default_offset:
if defaults[i - default_offset] is not None:
values[-1] += "=" + defaults[i - default_offset].as_string()
return ", ".join(values)
class AssignAttr(mixins.ParentAssignTypeMixin, NodeNG):
"""Variation of :class:`ast.Assign` representing assignment to an attribute.
>>> node = astroid.extract_node('self.attribute = range(10)')
>>> node
<Assign l.1 at 0x7effe1d521d0>
>>> list(node.get_children())
[<AssignAttr.attribute l.1 at 0x7effe1d52320>, <Call l.1 at 0x7effe1d522e8>]
>>> list(node.get_children())[0].as_string()
'self.attribute'
"""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
expr = None
"""What has the attribute that is being assigned to.
:type: NodeNG or None
"""
def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None):
"""
:param attrname: The name of the attribute being assigned to.
:type attrname: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.attrname = attrname
"""The name of the attribute being assigned to.
:type: str or None
"""
super(AssignAttr, self).__init__(lineno, col_offset, parent)
def postinit(self, expr=None):
"""Do some setup after initialisation.
:param expr: What has the attribute that is being assigned to.
:type expr: NodeNG or None
"""
self.expr = expr
def get_children(self):
yield self.expr
class Assert(Statement):
"""Class representing an :class:`ast.Assert` node.
An :class:`Assert` node represents an assert statement.
>>> node = astroid.extract_node('assert len(things) == 10, "Not enough things"')
>>> node
<Assert l.1 at 0x7effe1d527b8>
"""
_astroid_fields = ("test", "fail")
test = None
"""The test that passes or fails the assertion.
:type: NodeNG or None
"""
fail = None
"""The message shown when the assertion fails.
:type: NodeNG or None
"""
def postinit(self, test=None, fail=None):
"""Do some setup after initialisation.
:param test: The test that passes or fails the assertion.
:type test: NodeNG or None
:param fail: The message shown when the assertion fails.
:type fail: NodeNG or None
"""
self.fail = fail
self.test = test
def get_children(self):
yield self.test
if self.fail is not None:
yield self.fail
class Assign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.Assign` node.
An :class:`Assign` is a statement where something is explicitly
asssigned to.
>>> node = astroid.extract_node('variable = range(10)')
>>> node
<Assign l.1 at 0x7effe1db8550>
"""
_astroid_fields = ("targets", "value")
_other_other_fields = ("type_annotation",)
targets = None
"""What is being assigned to.
:type: list(NodeNG) or None
"""
value = None
"""The value being assigned to the variables.
:type: NodeNG or None
"""
type_annotation = None
"""If present, this will contain the type annotation passed by a type comment
:type: NodeNG or None
"""
def postinit(self, targets=None, value=None, type_annotation=None):
"""Do some setup after initialisation.
:param targets: What is being assigned to.
:type targets: list(NodeNG) or None
:param value: The value being assigned to the variables.
:type: NodeNG or None
"""
self.targets = targets
self.value = value
self.type_annotation = type_annotation
def get_children(self):
yield from self.targets
yield self.value
@decorators.cached
def _get_assign_nodes(self):
return [self] + list(self.value._get_assign_nodes())
def _get_yield_nodes_skip_lambdas(self):
yield from self.value._get_yield_nodes_skip_lambdas()
class AnnAssign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.AnnAssign` node.
An :class:`AnnAssign` is an assignment with a type annotation.
>>> node = astroid.extract_node('variable: List[int] = range(10)')
>>> node
<AnnAssign l.1 at 0x7effe1d4c630>
"""
_astroid_fields = ("target", "annotation", "value")
_other_fields = ("simple",)
target = None
"""What is being assigned to.
:type: NodeNG or None
"""
annotation = None
"""The type annotation of what is being assigned to.
:type: NodeNG
"""
value = None
"""The value being assigned to the variables.
:type: NodeNG or None
"""
simple = None
"""Whether :attr:`target` is a pure name or a complex statement.
:type: int
"""
def postinit(self, target, annotation, simple, value=None):
"""Do some setup after initialisation.
:param target: What is being assigned to.
:type target: NodeNG
:param annotation: The type annotation of what is being assigned to.
:type: NodeNG
:param simple: Whether :attr:`target` is a pure name
or a complex statement.
:type simple: int
:param value: The value being assigned to the variables.
:type: NodeNG or None
"""
self.target = target
self.annotation = annotation
self.value = value
self.simple = simple
def get_children(self):
yield self.target
yield self.annotation
if self.value is not None:
yield self.value
class AugAssign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.AugAssign` node.
An :class:`AugAssign` is an assignment paired with an operator.
>>> node = astroid.extract_node('variable += 1')
>>> node
<AugAssign l.1 at 0x7effe1db4d68>
"""
_astroid_fields = ("target", "value")
_other_fields = ("op",)
target = None
"""What is being assigned to.
:type: NodeNG or None
"""
value = None
"""The value being assigned to the variable.
:type: NodeNG or None
"""
def __init__(self, op=None, lineno=None, col_offset=None, parent=None):
"""
:param op: The operator that is being combined with the assignment.
This includes the equals sign.
:type op: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.op = op
"""The operator that is being combined with the assignment.
This includes the equals sign.
:type: str or None
"""
super(AugAssign, self).__init__(lineno, col_offset, parent)
def postinit(self, target=None, value=None):
"""Do some setup after initialisation.
:param target: What is being assigned to.
:type target: NodeNG or None
:param value: The value being assigned to the variable.
:type: NodeNG or None
"""
self.target = target
self.value = value
# This is set by inference.py
def _infer_augassign(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage` ,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_augassign(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except exceptions.InferenceError:
return []
def get_children(self):
yield self.target
yield self.value
class Repr(NodeNG):
"""Class representing an :class:`ast.Repr` node.
A :class:`Repr` node represents the backtick syntax,
which is a deprecated alias for :func:`repr` removed in Python 3.
>>> node = astroid.extract_node('`variable`')
>>> node
<Repr l.1 at 0x7fa0951d75d0>
"""
_astroid_fields = ("value",)
value = None
"""What is having :func:`repr` called on it.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: What is having :func:`repr` called on it.
:type value: NodeNG or None
"""
self.value = value
class BinOp(NodeNG):
"""Class representing an :class:`ast.BinOp` node.
A :class:`BinOp` node is an application of a binary operator.
>>> node = astroid.extract_node('a + b')
>>> node
<BinOp l.1 at 0x7f23b2e8cfd0>
"""
_astroid_fields = ("left", "right")
_other_fields = ("op",)
left = None
"""What is being applied to the operator on the left side.
:type: NodeNG or None
"""
right = None
"""What is being applied to the operator on the right side.
:type: NodeNG or None
"""
def __init__(self, op=None, lineno=None, col_offset=None, parent=None):
"""
:param op: The operator.
:type: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.op = op
"""The operator.
:type: str or None
"""
super(BinOp, self).__init__(lineno, col_offset, parent)
def postinit(self, left=None, right=None):
"""Do some setup after initialisation.
:param left: What is being applied to the operator on the left side.
:type left: NodeNG or None
:param right: What is being applied to the operator on the right side.
:type right: NodeNG or None
"""
self.left = left
self.right = right
# This is set by inference.py
def _infer_binop(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_binop(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except exceptions.InferenceError:
return []
def get_children(self):
yield self.left
yield self.right
def op_precedence(self):
return OP_PRECEDENCE[self.op]
def op_left_associative(self):
# 2**3**4 == 2**(3**4)
return self.op != "**"
class BoolOp(NodeNG):
"""Class representing an :class:`ast.BoolOp` node.
A :class:`BoolOp` is an application of a boolean operator.
>>> node = astroid.extract_node('a and b')
>>> node
<BinOp l.1 at 0x7f23b2e71c50>
"""
_astroid_fields = ("values",)
_other_fields = ("op",)
values = None
"""The values being applied to the operator.
:type: list(NodeNG) or None
"""
def __init__(self, op=None, lineno=None, col_offset=None, parent=None):
"""
:param op: The operator.
:type: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.op = op
"""The operator.
:type: str or None
"""
super(BoolOp, self).__init__(lineno, col_offset, parent)
def postinit(self, values=None):
"""Do some setup after initialisation.
:param values: The values being applied to the operator.
:type values: list(NodeNG) or None
"""
self.values = values
def get_children(self):
yield from self.values
def op_precedence(self):
return OP_PRECEDENCE[self.op]
class Break(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Break` node.
>>> node = astroid.extract_node('break')
>>> node
<Break l.1 at 0x7f23b2e9e5c0>
"""
class Call(NodeNG):
"""Class representing an :class:`ast.Call` node.
A :class:`Call` node is a call to a function, method, etc.
>>> node = astroid.extract_node('function()')
>>> node
<Call l.1 at 0x7f23b2e71eb8>
"""
_astroid_fields = ("func", "args", "keywords")
func = None
"""What is being called.
:type: NodeNG or None
"""
args = None
"""The positional arguments being given to the call.
:type: list(NodeNG) or None
"""
keywords = None
"""The keyword arguments being given to the call.
:type: list(NodeNG) or None
"""
def postinit(self, func=None, args=None, keywords=None):
"""Do some setup after initialisation.
:param func: What is being called.
:type func: NodeNG or None
:param args: The positional arguments being given to the call.
:type args: list(NodeNG) or None
:param keywords: The keyword arguments being given to the call.
:type keywords: list(NodeNG) or None
"""
self.func = func
self.args = args
self.keywords = keywords
@property
def starargs(self):
"""The positional arguments that unpack something.
:type: list(Starred)
"""
args = self.args or []
return [arg for arg in args if isinstance(arg, Starred)]
@property
def kwargs(self):
"""The keyword arguments that unpack something.
:type: list(Keyword)
"""
keywords = self.keywords or []
return [keyword for keyword in keywords if keyword.arg is None]
def get_children(self):
yield self.func
yield from self.args
yield from self.keywords or ()
class Compare(NodeNG):
"""Class representing an :class:`ast.Compare` node.
A :class:`Compare` node indicates a comparison.
>>> node = astroid.extract_node('a <= b <= c')
>>> node
<Compare l.1 at 0x7f23b2e9e6d8>
>>> node.ops
[('<=', <Name.b l.1 at 0x7f23b2e9e2b0>), ('<=', <Name.c l.1 at 0x7f23b2e9e390>)]
"""
_astroid_fields = ("left", "ops")
left = None
"""The value at the left being applied to a comparison operator.
:type: NodeNG or None
"""
ops = None
"""The remainder of the operators and their relevant right hand value.
:type: list(tuple(str, NodeNG)) or None
"""
def postinit(self, left=None, ops=None):
"""Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:type left: NodeNG or None
:param ops: The remainder of the operators
and their relevant right hand value.
:type ops: list(tuple(str, NodeNG)) or None
"""
self.left = left
self.ops = ops
def get_children(self):
"""Get the child nodes below this node.
Overridden to handle the tuple fields and skip returning the operator
strings.
:returns: The children.
:rtype: iterable(NodeNG)
"""
yield self.left
for _, comparator in self.ops:
yield comparator # we don't want the 'op'
def last_child(self):
"""An optimized version of list(get_children())[-1]
:returns: The last child.
:rtype: NodeNG
"""
# XXX maybe if self.ops:
return self.ops[-1][1]
# return self.left
class Comprehension(NodeNG):
"""Class representing an :class:`ast.comprehension` node.
A :class:`Comprehension` indicates the loop inside any type of
comprehension including generator expressions.
>>> node = astroid.extract_node('[x for x in some_values]')
>>> list(node.get_children())
[<Name.x l.1 at 0x7f23b2e352b0>, <Comprehension l.1 at 0x7f23b2e35320>]
>>> list(node.get_children())[1].as_string()
'for x in some_values'
"""
_astroid_fields = ("target", "iter", "ifs")
_other_fields = ("is_async",)
target = None
"""What is assigned to by the comprehension.
:type: NodeNG or None
"""
iter = None
"""What is iterated over by the comprehension.
:type: NodeNG or None
"""
ifs = None
"""The contents of any if statements that filter the comprehension.
:type: list(NodeNG) or None
"""
is_async = None
"""Whether this is an asynchronous comprehension or not.
:type: bool or None
"""
def __init__(self, parent=None):
"""
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
super(Comprehension, self).__init__()
self.parent = parent
# pylint: disable=redefined-builtin; same name as builtin ast module.
def postinit(self, target=None, iter=None, ifs=None, is_async=None):
"""Do some setup after initialisation.
:param target: What is assigned to by the comprehension.
:type target: NodeNG or None
:param iter: What is iterated over by the comprehension.
:type iter: NodeNG or None
:param ifs: The contents of any if statements that filter
the comprehension.
:type ifs: list(NodeNG) or None
:param is_async: Whether this is an asynchronous comprehension or not.
:type: bool or None
"""
self.target = target
self.iter = iter
self.ifs = ifs
self.is_async = is_async
optional_assign = True
"""Whether this node optionally assigns a variable.
:type: bool
"""
def assign_type(self):
"""The type of assignment that this node performs.
:returns: The assignment type.
:rtype: NodeNG
"""
return self
def _get_filtered_stmts(self, lookup_node, node, stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
if isinstance(lookup_node, (Const, Name)):
return [lookup_node], True
elif self.statement() is mystmt:
# original node's statement is the assignment, only keeps
# current node (gen exp, list comp)
return [node], True
return stmts, False
def get_children(self):
yield self.target
yield self.iter
yield from self.ifs
class Const(mixins.NoChildrenMixin, NodeNG, bases.Instance):
"""Class representing any constant including num, str, bool, None, bytes.
>>> node = astroid.extract_node('(5, "This is a string.", True, None, b"bytes")')
>>> node
<Tuple.tuple l.1 at 0x7f23b2e358d0>
>>> list(node.get_children())
[<Const.int l.1 at 0x7f23b2e35940>,
<Const.str l.1 at 0x7f23b2e35978>,
<Const.bool l.1 at 0x7f23b2e359b0>,
<Const.NoneType l.1 at 0x7f23b2e359e8>,
<Const.bytes l.1 at 0x7f23b2e35a20>]
"""
_other_fields = ("value",)
def __init__(self, value, lineno=None, col_offset=None, parent=None):
"""
:param value: The value that the constant represents.
:type value: object
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.value = value
"""The value that the constant represents.
:type: object
"""
super(Const, self).__init__(lineno, col_offset, parent)
def __getattr__(self, name):
# This is needed because of Proxy's __getattr__ method.
# Calling object.__new__ on this class without calling
# __init__ would result in an infinite loop otherwise
# since __getattr__ is called when an attribute doesn't
# exist and self._proxied indirectly calls self.value
# and Proxy __getattr__ calls self.value
if name == "value":
raise AttributeError
return super().__getattr__(name)
def getitem(self, index, context=None):
"""Get an item from this node if subscriptable.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
"""
if isinstance(index, Const):
index_value = index.value
elif isinstance(index, Slice):
index_value = _infer_slice(index, context=context)
else:
raise exceptions.AstroidTypeError(
"Could not use type {} as subscript index".format(type(index))
)
try:
if isinstance(self.value, (str, bytes)):
return Const(self.value[index_value])
except IndexError as exc:
raise exceptions.AstroidIndexError(
message="Index {index!r} out of range",
node=self,
index=index,
context=context,
) from exc
except TypeError as exc:
raise exceptions.AstroidTypeError(
message="Type error {error!r}", node=self, index=index, context=context
) from exc
raise exceptions.AstroidTypeError("%r (value=%s)" % (self, self.value))
def has_dynamic_getattr(self):
"""Check if the node has a custom __getattr__ or __getattribute__.
:returns: True if the class has a custom
__getattr__ or __getattribute__, False otherwise.
For a :class:`Const` this is always ``False``.
:rtype: bool
"""
return False
def itered(self):
"""An iterator over the elements this node contains.
:returns: The contents of this node.
:rtype: iterable(str)
:raises TypeError: If this node does not represent something that is iterable.
"""
if isinstance(self.value, str):
return self.value
raise TypeError()
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return self._proxied.qname()
def bool_value(self):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool
"""
return bool(self.value)
class Continue(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Continue` node.
>>> node = astroid.extract_node('continue')
>>> node
<Continue l.1 at 0x7f23b2e35588>
"""
class Decorators(NodeNG):
"""A node representing a list of decorators.
A :class:`Decorators` is the decorators that are applied to
a method or function.
>>> node = astroid.extract_node('''
@property
def my_property(self):
return 3
''')
>>> node
<FunctionDef.my_property l.2 at 0x7f23b2e35d30>
>>> list(node.get_children())[0]
<Decorators l.1 at 0x7f23b2e35d68>
"""
_astroid_fields = ("nodes",)
nodes = None
"""The decorators that this node contains.
:type: list(Name or Call) or None
"""
def postinit(self, nodes):
"""Do some setup after initialisation.
:param nodes: The decorators that this node contains.
:type nodes: list(Name or Call)
"""
self.nodes = nodes
def scope(self):
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
# skip the function node to go directly to the upper level scope
return self.parent.parent.scope()
def get_children(self):
yield from self.nodes
class DelAttr(mixins.ParentAssignTypeMixin, NodeNG):
"""Variation of :class:`ast.Delete` representing deletion of an attribute.
>>> node = astroid.extract_node('del self.attr')
>>> node
<Delete l.1 at 0x7f23b2e35f60>
>>> list(node.get_children())[0]
<DelAttr.attr l.1 at 0x7f23b2e411d0>
"""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
expr = None
"""The name that this node represents.
:type: Name or None
"""
def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None):
"""
:param attrname: The name of the attribute that is being deleted.
:type attrname: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.attrname = attrname
"""The name of the attribute that is being deleted.
:type: str or None
"""
super(DelAttr, self).__init__(lineno, col_offset, parent)
def postinit(self, expr=None):
"""Do some setup after initialisation.
:param expr: The name that this node represents.
:type expr: Name or None
"""
self.expr = expr
def get_children(self):
yield self.expr
class Delete(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.Delete` node.
A :class:`Delete` is a ``del`` statement this is deleting something.
>>> node = astroid.extract_node('del self.attr')
>>> node
<Delete l.1 at 0x7f23b2e35f60>
"""
_astroid_fields = ("targets",)
targets = None
"""What is being deleted.
:type: list(NodeNG) or None
"""
def postinit(self, targets=None):
"""Do some setup after initialisation.
:param targets: What is being deleted.
:type targets: list(NodeNG) or None
"""
self.targets = targets
def get_children(self):
yield from self.targets
class Dict(NodeNG, bases.Instance):
"""Class representing an :class:`ast.Dict` node.
A :class:`Dict` is a dictionary that is created with ``{}`` syntax.
>>> node = astroid.extract_node('{1: "1"}')
>>> node
<Dict.dict l.1 at 0x7f23b2e35cc0>
"""
_astroid_fields = ("items",)
def __init__(self, lineno=None, col_offset=None, parent=None):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.items = []
"""The key-value pairs contained in the dictionary.
:type: list(tuple(NodeNG, NodeNG))
"""
super(Dict, self).__init__(lineno, col_offset, parent)
def postinit(self, items):
"""Do some setup after initialisation.
:param items: The key-value pairs contained in the dictionary.
:type items: list(tuple(NodeNG, NodeNG))
"""
self.items = items
@classmethod
def from_constants(cls, items=None):
"""Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict
"""
node = cls()
if items is None:
node.items = []
else:
node.items = [
(const_factory(k), const_factory(v)) for k, v in items.items()
]
return node
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.dict" % BUILTINS
def get_children(self):
"""Get the key and value nodes below this node.
Children are returned in the order that they are defined in the source
code, key first then the value.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for key, value in self.items:
yield key
yield value
def last_child(self):
"""An optimized version of list(get_children())[-1]
:returns: The last child, or None if no children exist.
:rtype: NodeNG or None
"""
if self.items:
return self.items[-1][1]
return None
def itered(self):
"""An iterator over the keys this node contains.
:returns: The keys of this node.
:rtype: iterable(NodeNG)
"""
return [key for (key, _) in self.items]
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
:raises AstroidIndexError: If the given index does not exist in the
dictionary.
"""
for key, value in self.items:
# TODO(cpopa): no support for overriding yet, {1:2, **{1: 3}}.
if isinstance(key, DictUnpack):
try:
return value.getitem(index, context)
except (exceptions.AstroidTypeError, exceptions.AstroidIndexError):
continue
for inferredkey in key.infer(context):
if inferredkey is util.Uninferable:
continue
if isinstance(inferredkey, Const) and isinstance(index, Const):
if inferredkey.value == index.value:
return value
raise exceptions.AstroidIndexError(index)
def bool_value(self):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool
"""
return bool(self.items)
class Expr(Statement):
"""Class representing an :class:`ast.Expr` node.
An :class:`Expr` is any expression that does not have its value used or
stored.
>>> node = astroid.extract_node('method()')
>>> node
<Call l.1 at 0x7f23b2e352b0>
>>> node.parent
<Expr l.1 at 0x7f23b2e35278>
"""
_astroid_fields = ("value",)
value = None
"""What the expression does.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: What the expression does.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
yield self.value
def _get_yield_nodes_skip_lambdas(self):
if not self.value.is_lambda:
yield from self.value._get_yield_nodes_skip_lambdas()
class Ellipsis(mixins.NoChildrenMixin, NodeNG): # pylint: disable=redefined-builtin
"""Class representing an :class:`ast.Ellipsis` node.
An :class:`Ellipsis` is the ``...`` syntax.
>>> node = astroid.extract_node('...')
>>> node
<Ellipsis l.1 at 0x7f23b2e35160>
"""
def bool_value(self):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For an :class:`Ellipsis` this is always ``True``.
:rtype: bool
"""
return True
class EmptyNode(mixins.NoChildrenMixin, NodeNG):
"""Holds an arbitrary object in the :attr:`LocalsDictNodeNG.locals`."""
object = None
class ExceptHandler(mixins.MultiLineBlockMixin, mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.ExceptHandler`. node.
An :class:`ExceptHandler` is an ``except`` block on a try-except.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
''')
>>> node
<TryExcept l.2 at 0x7f23b2e9d908>
>>> >>> node.handlers
[<ExceptHandler l.4 at 0x7f23b2e9e860>]
"""
_astroid_fields = ("type", "name", "body")
_multi_line_block_fields = ("body",)
type = None
"""The types that the block handles.
:type: Tuple or NodeNG or None
"""
name = None
"""The name that the caught exception is assigned to.
:type: AssignName or None
"""
body = None
"""The contents of the block.
:type: list(NodeNG) or None
"""
def get_children(self):
if self.type is not None:
yield self.type
if self.name is not None:
yield self.name
yield from self.body
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(self, type=None, name=None, body=None):
"""Do some setup after initialisation.
:param type: The types that the block handles.
:type type: Tuple or NodeNG or None
:param name: The name that the caught exception is assigned to.
:type name: AssignName or None
:param body:The contents of the block.
:type body: list(NodeNG) or None
"""
self.type = type
self.name = name
self.body = body
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
if self.name:
return self.name.tolineno
if self.type:
return self.type.tolineno
return self.lineno
def catch(self, exceptions): # pylint: disable=redefined-outer-name
"""Check if this node handles any of the given exceptions.
If ``exceptions`` is empty, this will default to ``True``.
:param exceptions: The name of the exceptions to check for.
:type exceptions: list(str)
"""
if self.type is None or exceptions is None:
return True
for node in self.type._get_name_nodes():
if node.name in exceptions:
return True
return False
class Exec(Statement):
"""Class representing the ``exec`` statement.
>>> node = astroid.extract_node('exec "True"')
>>> node
<Exec l.1 at 0x7f0e8106c6d0>
"""
_astroid_fields = ("expr", "globals", "locals")
expr = None
"""The expression to be executed.
:type: NodeNG or None
"""
globals = None
"""The globals dictionary to execute with.
:type: NodeNG or None
"""
locals = None
"""The locals dictionary to execute with.
:type: NodeNG or None
"""
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(self, expr=None, globals=None, locals=None):
"""Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None
"""
self.expr = expr
self.globals = globals
self.locals = locals
class ExtSlice(NodeNG):
"""Class representing an :class:`ast.ExtSlice` node.
An :class:`ExtSlice` is a complex slice expression.
>>> node = astroid.extract_node('l[1:3, 5]')
>>> node
<Subscript l.1 at 0x7f23b2e9e550>
>>> node.slice
<ExtSlice l.1 at 0x7f23b7b05ef0>
"""
_astroid_fields = ("dims",)
dims = None
"""The simple dimensions that form the complete slice.
:type: list(NodeNG) or None
"""
def postinit(self, dims=None):
"""Do some setup after initialisation.
:param dims: The simple dimensions that form the complete slice.
:type dims: list(NodeNG) or None
"""
self.dims = dims
class For(
mixins.MultiLineBlockMixin,
mixins.BlockRangeMixIn,
mixins.AssignTypeMixin,
Statement,
):
"""Class representing an :class:`ast.For` node.
>>> node = astroid.extract_node('for thing in things: print(thing)')
>>> node
<For l.1 at 0x7f23b2e8cf28>
"""
_astroid_fields = ("target", "iter", "body", "orelse")
_other_other_fields = ("type_annotation",)
_multi_line_block_fields = ("body", "orelse")
target = None
"""What the loop assigns to.
:type: NodeNG or None
"""
iter = None
"""What the loop iterates over.
:type: NodeNG or None
"""
body = None
"""The contents of the body of the loop.
:type: list(NodeNG) or None
"""
orelse = None
"""The contents of the ``else`` block of the loop.
:type: list(NodeNG) or None
"""
type_annotation = None
"""If present, this will contain the type annotation passed by a type comment
:type: NodeNG or None
"""
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(
self, target=None, iter=None, body=None, orelse=None, type_annotation=None
):
"""Do some setup after initialisation.
:param target: What the loop assigns to.
:type target: NodeNG or None
:param iter: What the loop iterates over.
:type iter: NodeNG or None
:param body: The contents of the body of the loop.
:type body: list(NodeNG) or None
:param orelse: The contents of the ``else`` block of the loop.
:type orelse: list(NodeNG) or None
"""
self.target = target
self.iter = iter
self.body = body
self.orelse = orelse
self.type_annotation = type_annotation
optional_assign = True
"""Whether this node optionally assigns a variable.
This is always ``True`` for :class:`For` nodes.
:type: bool
"""
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.iter.tolineno
def get_children(self):
yield self.target
yield self.iter
yield from self.body
yield from self.orelse
class AsyncFor(For):
"""Class representing an :class:`ast.AsyncFor` node.
An :class:`AsyncFor` is an asynchronous :class:`For` built with
the ``async`` keyword.
>>> node = astroid.extract_node('''
async def func(things):
async for thing in things:
print(thing)
''')
>>> node
<AsyncFunctionDef.func l.2 at 0x7f23b2e416d8>
>>> node.body[0]
<AsyncFor l.3 at 0x7f23b2e417b8>
"""
class Await(NodeNG):
"""Class representing an :class:`ast.Await` node.
An :class:`Await` is the ``await`` keyword.
>>> node = astroid.extract_node('''
async def func(things):
await other_func()
''')
>>> node
<AsyncFunctionDef.func l.2 at 0x7f23b2e41748>
>>> node.body[0]
<Expr l.3 at 0x7f23b2e419e8>
>>> list(node.body[0].get_children())[0]
<Await l.3 at 0x7f23b2e41a20>
"""
_astroid_fields = ("value",)
value = None
"""What to wait for.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: What to wait for.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
yield self.value
class ImportFrom(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement):
"""Class representing an :class:`ast.ImportFrom` node.
>>> node = astroid.extract_node('from my_package import my_module')
>>> node
<ImportFrom l.1 at 0x7f23b2e415c0>
"""
_other_fields = ("modname", "names", "level")
def __init__(
self, fromname, names, level=0, lineno=None, col_offset=None, parent=None
):
"""
:param fromname: The module that is being imported from.
:type fromname: str or None
:param names: What is being imported from the module.
:type names: list(tuple(str, str or None))
:param level: The level of relative import.
:type level: int
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.modname = fromname
"""The module that is being imported from.
This is ``None`` for relative imports.
:type: str or None
"""
self.names = names
"""What is being imported from the module.
Each entry is a :class:`tuple` of the name being imported,
and the alias that the name is assigned to (if any).
:type: list(tuple(str, str or None))
"""
self.level = level
"""The level of relative import.
Essentially this is the number of dots in the import.
This is always 0 for absolute imports.
:type: int
"""
super(ImportFrom, self).__init__(lineno, col_offset, parent)
class Attribute(NodeNG):
"""Class representing an :class:`ast.Attribute` node."""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
expr = None
"""The name that this node represents.
:type: Name or None
"""
def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None):
"""
:param attrname: The name of the attribute.
:type attrname: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.attrname = attrname
"""The name of the attribute.
:type: str or None
"""
super(Attribute, self).__init__(lineno, col_offset, parent)
def postinit(self, expr=None):
"""Do some setup after initialisation.
:param expr: The name that this node represents.
:type expr: Name or None
"""
self.expr = expr
def get_children(self):
yield self.expr
class Global(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Global` node.
>>> node = astroid.extract_node('global a_global')
>>> node
<Global l.1 at 0x7f23b2e9de10>
"""
_other_fields = ("names",)
def __init__(self, names, lineno=None, col_offset=None, parent=None):
"""
:param names: The names being declared as global.
:type names: list(str)
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.names = names
"""The names being declared as global.
:type: list(str)
"""
super(Global, self).__init__(lineno, col_offset, parent)
def _infer_name(self, frame, name):
return name
class If(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.If` node.
>>> node = astroid.extract_node('if condition: print(True)')
>>> node
<If l.1 at 0x7f23b2e9dd30>
"""
_astroid_fields = ("test", "body", "orelse")
_multi_line_block_fields = ("body", "orelse")
test = None
"""The condition that the statement tests.
:type: NodeNG or None
"""
body = None
"""The contents of the block.
:type: list(NodeNG) or None
"""
orelse = None
"""The contents of the ``else`` block.
:type: list(NodeNG) or None
"""
def postinit(self, test=None, body=None, orelse=None):
"""Do some setup after initialisation.
:param test: The condition that the statement tests.
:type test: NodeNG or None
:param body: The contents of the block.
:type body: list(NodeNG) or None
:param orelse: The contents of the ``else`` block.
:type orelse: list(NodeNG) or None
"""
self.test = test
self.body = body
self.orelse = orelse
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.test.tolineno
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
if lineno == self.body[0].fromlineno:
return lineno, lineno
if lineno <= self.body[-1].tolineno:
return lineno, self.body[-1].tolineno
return self._elsed_block_range(lineno, self.orelse, self.body[0].fromlineno - 1)
def get_children(self):
yield self.test
yield from self.body
yield from self.orelse
def has_elif_block(self):
return len(self.orelse) == 1 and isinstance(self.orelse[0], If)
class IfExp(NodeNG):
"""Class representing an :class:`ast.IfExp` node.
>>> node = astroid.extract_node('value if condition else other')
>>> node
<IfExp l.1 at 0x7f23b2e9dbe0>
"""
_astroid_fields = ("test", "body", "orelse")
test = None
"""The condition that the statement tests.
:type: NodeNG or None
"""
body = None
"""The contents of the block.
:type: list(NodeNG) or None
"""
orelse = None
"""The contents of the ``else`` block.
:type: list(NodeNG) or None
"""
def postinit(self, test=None, body=None, orelse=None):
"""Do some setup after initialisation.
:param test: The condition that the statement tests.
:type test: NodeNG or None
:param body: The contents of the block.
:type body: list(NodeNG) or None
:param orelse: The contents of the ``else`` block.
:type orelse: list(NodeNG) or None
"""
self.test = test
self.body = body
self.orelse = orelse
def get_children(self):
yield self.test
yield self.body
yield self.orelse
def op_left_associative(self):
# `1 if True else 2 if False else 3` is parsed as
# `1 if True else (2 if False else 3)`
return False
class Import(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement):
"""Class representing an :class:`ast.Import` node.
>>> node = astroid.extract_node('import astroid')
>>> node
<Import l.1 at 0x7f23b2e4e5c0>
"""
_other_fields = ("names",)
def __init__(self, names=None, lineno=None, col_offset=None, parent=None):
"""
:param names: The names being imported.
:type names: list(tuple(str, str or None)) or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.names = names
"""The names being imported.
Each entry is a :class:`tuple` of the name being imported,
and the alias that the name is assigned to (if any).
:type: list(tuple(str, str or None)) or None
"""
super(Import, self).__init__(lineno, col_offset, parent)
class Index(NodeNG):
"""Class representing an :class:`ast.Index` node.
An :class:`Index` is a simple subscript.
>>> node = astroid.extract_node('things[1]')
>>> node
<Subscript l.1 at 0x7f23b2e9e2b0>
>>> node.slice
<Index l.1 at 0x7f23b2e9e6a0>
"""
_astroid_fields = ("value",)
value = None
"""The value to subscript with.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: The value to subscript with.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
yield self.value
class Keyword(NodeNG):
"""Class representing an :class:`ast.keyword` node.
>>> node = astroid.extract_node('function(a_kwarg=True)')
>>> node
<Call l.1 at 0x7f23b2e9e320>
>>> node.keywords
[<Keyword l.1 at 0x7f23b2e9e9b0>]
"""
_astroid_fields = ("value",)
_other_fields = ("arg",)
value = None
"""The value being assigned to the keyword argument.
:type: NodeNG or None
"""
def __init__(self, arg=None, lineno=None, col_offset=None, parent=None):
"""
:param arg: The argument being assigned to.
:type arg: Name or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.arg = arg
"""The argument being assigned to.
:type: Name or None
"""
super(Keyword, self).__init__(lineno, col_offset, parent)
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: The value being assigned to the ketword argument.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
yield self.value
class List(_BaseContainer):
"""Class representing an :class:`ast.List` node.
>>> node = astroid.extract_node('[1, 2, 3]')
>>> node
<List.list l.1 at 0x7f23b2e9e128>
"""
_other_fields = ("ctx",)
def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None):
"""
:param ctx: Whether the list is assigned to or loaded from.
:type ctx: Context or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.ctx = ctx
"""Whether the list is assigned to or loaded from.
:type: Context or None
"""
super(List, self).__init__(lineno, col_offset, parent)
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.list" % BUILTINS
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
"""
return _container_getitem(self, self.elts, index, context=context)
class Nonlocal(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Nonlocal` node.
>>> node = astroid.extract_node('''
def function():
nonlocal var
''')
>>> node
<FunctionDef.function l.2 at 0x7f23b2e9e208>
>>> node.body[0]
<Nonlocal l.3 at 0x7f23b2e9e908>
"""
_other_fields = ("names",)
def __init__(self, names, lineno=None, col_offset=None, parent=None):
"""
:param names: The names being declared as not local.
:type names: list(str)
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.names = names
"""The names being declared as not local.
:type: list(str)
"""
super(Nonlocal, self).__init__(lineno, col_offset, parent)
def _infer_name(self, frame, name):
return name
class Pass(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Pass` node.
>>> node = astroid.extract_node('pass')
>>> node
<Pass l.1 at 0x7f23b2e9e748>
"""
class Print(Statement):
"""Class representing an :class:`ast.Print` node.
>>> node = astroid.extract_node('print "A message"')
>>> node
<Print l.1 at 0x7f0e8101d290>
"""
_astroid_fields = ("dest", "values")
dest = None
"""Where to print to.
:type: NodeNG or None
"""
values = None
"""What to print.
:type: list(NodeNG) or None
"""
def __init__(self, nl=None, lineno=None, col_offset=None, parent=None):
"""
:param nl: Whether to print a new line.
:type nl: bool or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.nl = nl
"""Whether to print a new line.
:type: bool or None
"""
super(Print, self).__init__(lineno, col_offset, parent)
def postinit(self, dest=None, values=None):
"""Do some setup after initialisation.
:param dest: Where to print to.
:type dest: NodeNG or None
:param values: What to print.
:type values: list(NodeNG) or None
"""
self.dest = dest
self.values = values
class Raise(Statement):
"""Class representing an :class:`ast.Raise` node.
>>> node = astroid.extract_node('raise RuntimeError("Something bad happened!")')
>>> node
<Raise l.1 at 0x7f23b2e9e828>
"""
exc = None
"""What is being raised.
:type: NodeNG or None
"""
_astroid_fields = ("exc", "cause")
cause = None
"""The exception being used to raise this one.
:type: NodeNG or None
"""
def postinit(self, exc=None, cause=None):
"""Do some setup after initialisation.
:param exc: What is being raised.
:type exc: NodeNG or None
:param cause: The exception being used to raise this one.
:type cause: NodeNG or None
"""
self.exc = exc
self.cause = cause
def raises_not_implemented(self):
"""Check if this node raises a :class:`NotImplementedError`.
:returns: True if this node raises a :class:`NotImplementedError`,
False otherwise.
:rtype: bool
"""
if not self.exc:
return False
for name in self.exc._get_name_nodes():
if name.name == "NotImplementedError":
return True
return False
def get_children(self):
if self.exc is not None:
yield self.exc
if self.cause is not None:
yield self.cause
class Return(Statement):
"""Class representing an :class:`ast.Return` node.
>>> node = astroid.extract_node('return True')
>>> node
<Return l.1 at 0x7f23b8211908>
"""
_astroid_fields = ("value",)
value = None
"""The value being returned.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: The value being returned.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
if self.value is not None:
yield self.value
def is_tuple_return(self):
return isinstance(self.value, Tuple)
def _get_return_nodes_skip_functions(self):
yield self
class Set(_BaseContainer):
"""Class representing an :class:`ast.Set` node.
>>> node = astroid.extract_node('{1, 2, 3}')
>>> node
<Set.set l.1 at 0x7f23b2e71d68>
"""
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.set" % BUILTINS
class Slice(NodeNG):
"""Class representing an :class:`ast.Slice` node.
>>> node = astroid.extract_node('things[1:3]')
>>> node
<Subscript l.1 at 0x7f23b2e71f60>
>>> node.slice
<Slice l.1 at 0x7f23b2e71e80>
"""
_astroid_fields = ("lower", "upper", "step")
lower = None
"""The lower index in the slice.
:type: NodeNG or None
"""
upper = None
"""The upper index in the slice.
:type: NodeNG or None
"""
step = None
"""The step to take between indexes.
:type: NodeNG or None
"""
def postinit(self, lower=None, upper=None, step=None):
"""Do some setup after initialisation.
:param lower: The lower index in the slice.
:value lower: NodeNG or None
:param upper: The upper index in the slice.
:value upper: NodeNG or None
:param step: The step to take between index.
:param step: NodeNG or None
"""
self.lower = lower
self.upper = upper
self.step = step
def _wrap_attribute(self, attr):
"""Wrap the empty attributes of the Slice in a Const node."""
if not attr:
const = const_factory(attr)
const.parent = self
return const
return attr
@decorators.cachedproperty
def _proxied(self):
builtins = MANAGER.builtins_module
return builtins.getattr("slice")[0]
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.slice" % BUILTINS
def igetattr(self, attrname, context=None):
"""Infer the possible values of the given attribute on the slice.
:param attrname: The name of the attribute to infer.
:type attrname: str
:returns: The inferred possible values.
:rtype: iterable(NodeNG)
"""
if attrname == "start":
yield self._wrap_attribute(self.lower)
elif attrname == "stop":
yield self._wrap_attribute(self.upper)
elif attrname == "step":
yield self._wrap_attribute(self.step)
else:
yield from self.getattr(attrname, context=context)
def getattr(self, attrname, context=None):
return self._proxied.getattr(attrname, context)
def get_children(self):
if self.lower is not None:
yield self.lower
if self.upper is not None:
yield self.upper
if self.step is not None:
yield self.step
class Starred(mixins.ParentAssignTypeMixin, NodeNG):
"""Class representing an :class:`ast.Starred` node.
>>> node = astroid.extract_node('*args')
>>> node
<Starred l.1 at 0x7f23b2e41978>
"""
_astroid_fields = ("value",)
_other_fields = ("ctx",)
value = None
"""What is being unpacked.
:type: NodeNG or None
"""
def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None):
"""
:param ctx: Whether the list is assigned to or loaded from.
:type ctx: Context or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.ctx = ctx
"""Whether the starred item is assigned to or loaded from.
:type: Context or None
"""
super(Starred, self).__init__(
lineno=lineno, col_offset=col_offset, parent=parent
)
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: What is being unpacked.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
yield self.value
class Subscript(NodeNG):
"""Class representing an :class:`ast.Subscript` node.
>>> node = astroid.extract_node('things[1:3]')
>>> node
<Subscript l.1 at 0x7f23b2e71f60>
"""
_astroid_fields = ("value", "slice")
_other_fields = ("ctx",)
value = None
"""What is being indexed.
:type: NodeNG or None
"""
slice = None
"""The slice being used to lookup.
:type: NodeNG or None
"""
def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None):
"""
:param ctx: Whether the subscripted item is assigned to or loaded from.
:type ctx: Context or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.ctx = ctx
"""Whether the subscripted item is assigned to or loaded from.
:type: Context or None
"""
super(Subscript, self).__init__(
lineno=lineno, col_offset=col_offset, parent=parent
)
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(self, value=None, slice=None):
"""Do some setup after initialisation.
:param value: What is being indexed.
:type value: NodeNG or None
:param slice: The slice being used to lookup.
:type slice: NodeNG or None
"""
self.value = value
self.slice = slice
def get_children(self):
yield self.value
yield self.slice
class TryExcept(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.TryExcept` node.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
''')
>>> node
<TryExcept l.2 at 0x7f23b2e9d908>
"""
_astroid_fields = ("body", "handlers", "orelse")
_multi_line_block_fields = ("body", "handlers", "orelse")
body = None
"""The contents of the block to catch exceptions from.
:type: list(NodeNG) or None
"""
handlers = None
"""The exception handlers.
:type: list(ExceptHandler) or None
"""
orelse = None
"""The contents of the ``else`` block.
:type: list(NodeNG) or None
"""
def postinit(self, body=None, handlers=None, orelse=None):
"""Do some setup after initialisation.
:param body: The contents of the block to catch exceptions from.
:type body: list(NodeNG) or None
:param handlers: The exception handlers.
:type handlers: list(ExceptHandler) or None
:param orelse: The contents of the ``else`` block.
:type orelse: list(NodeNG) or None
"""
self.body = body
self.handlers = handlers
self.orelse = orelse
def _infer_name(self, frame, name):
return name
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
last = None
for exhandler in self.handlers:
if exhandler.type and lineno == exhandler.type.fromlineno:
return lineno, lineno
if exhandler.body[0].fromlineno <= lineno <= exhandler.body[-1].tolineno:
return lineno, exhandler.body[-1].tolineno
if last is None:
last = exhandler.body[0].fromlineno - 1
return self._elsed_block_range(lineno, self.orelse, last)
def get_children(self):
yield from self.body
yield from self.handlers or ()
yield from self.orelse or ()
class TryFinally(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.TryFinally` node.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
finally:
print("Cleanup!")
''')
>>> node
<TryFinally l.2 at 0x7f23b2e41d68>
"""
_astroid_fields = ("body", "finalbody")
_multi_line_block_fields = ("body", "finalbody")
body = None
"""The try-except that the finally is attached to.
:type: list(TryExcept) or None
"""
finalbody = None
"""The contents of the ``finally`` block.
:type: list(NodeNG) or None
"""
def postinit(self, body=None, finalbody=None):
"""Do some setup after initialisation.
:param body: The try-except that the finally is attached to.
:type body: list(TryExcept) or None
:param finalbody: The contents of the ``finally`` block.
:type finalbody: list(NodeNG) or None
"""
self.body = body
self.finalbody = finalbody
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
child = self.body[0]
# py2.5 try: except: finally:
if (
isinstance(child, TryExcept)
and child.fromlineno == self.fromlineno
and child.tolineno >= lineno > self.fromlineno
):
return child.block_range(lineno)
return self._elsed_block_range(lineno, self.finalbody)
def get_children(self):
yield from self.body
yield from self.finalbody
class Tuple(_BaseContainer):
"""Class representing an :class:`ast.Tuple` node.
>>> node = astroid.extract_node('(1, 2, 3)')
>>> node
<Tuple.tuple l.1 at 0x7f23b2e41780>
"""
_other_fields = ("ctx",)
def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None):
"""
:param ctx: Whether the tuple is assigned to or loaded from.
:type ctx: Context or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.ctx = ctx
"""Whether the tuple is assigned to or loaded from.
:type: Context or None
"""
super(Tuple, self).__init__(lineno, col_offset, parent)
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.tuple" % BUILTINS
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
"""
return _container_getitem(self, self.elts, index, context=context)
class UnaryOp(NodeNG):
"""Class representing an :class:`ast.UnaryOp` node.
>>> node = astroid.extract_node('-5')
>>> node
<UnaryOp l.1 at 0x7f23b2e4e198>
"""
_astroid_fields = ("operand",)
_other_fields = ("op",)
operand = None
"""What the unary operator is applied to.
:type: NodeNG or None
"""
def __init__(self, op=None, lineno=None, col_offset=None, parent=None):
"""
:param op: The operator.
:type: str or None
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
"""
self.op = op
"""The operator.
:type: str or None
"""
super(UnaryOp, self).__init__(lineno, col_offset, parent)
def postinit(self, operand=None):
"""Do some setup after initialisation.
:param operand: What the unary operator is applied to.
:type operand: NodeNG or None
"""
self.operand = operand
# This is set by inference.py
def _infer_unaryop(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_unaryop(context=context)
return [
result
for result in results
if isinstance(result, util.BadUnaryOperationMessage)
]
except exceptions.InferenceError:
return []
def get_children(self):
yield self.operand
def op_precedence(self):
if self.op == "not":
return OP_PRECEDENCE[self.op]
return super().op_precedence()
class While(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.While` node.
>>> node = astroid.extract_node('''
while condition():
print("True")
''')
>>> node
<While l.2 at 0x7f23b2e4e390>
"""
_astroid_fields = ("test", "body", "orelse")
_multi_line_block_fields = ("body", "orelse")
test = None
"""The condition that the loop tests.
:type: NodeNG or None
"""
body = None
"""The contents of the loop.
:type: list(NodeNG) or None
"""
orelse = None
"""The contents of the ``else`` block.
:type: list(NodeNG) or None
"""
def postinit(self, test=None, body=None, orelse=None):
"""Do some setup after initialisation.
:param test: The condition that the loop tests.
:type test: NodeNG or None
:param body: The contents of the loop.
:type body: list(NodeNG) or None
:param orelse: The contents of the ``else`` block.
:type orelse: list(NodeNG) or None
"""
self.test = test
self.body = body
self.orelse = orelse
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.test.tolineno
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
return self._elsed_block_range(lineno, self.orelse)
def get_children(self):
yield self.test
yield from self.body
yield from self.orelse
class With(
mixins.MultiLineBlockMixin,
mixins.BlockRangeMixIn,
mixins.AssignTypeMixin,
Statement,
):
"""Class representing an :class:`ast.With` node.
>>> node = astroid.extract_node('''
with open(file_path) as file_:
print(file_.read())
''')
>>> node
<With l.2 at 0x7f23b2e4e710>
"""
_astroid_fields = ("items", "body")
_other_other_fields = ("type_annotation",)
_multi_line_block_fields = ("body",)
items = None
"""The pairs of context managers and the names they are assigned to.
:type: list(tuple(NodeNG, AssignName or None)) or None
"""
body = None
"""The contents of the ``with`` block.
:type: list(NodeNG) or None
"""
type_annotation = None
"""If present, this will contain the type annotation passed by a type comment
:type: NodeNG or None
"""
def postinit(self, items=None, body=None, type_annotation=None):
"""Do some setup after initialisation.
:param items: The pairs of context managers and the names
they are assigned to.
:type items: list(tuple(NodeNG, AssignName or None)) or None
:param body: The contents of the ``with`` block.
:type body: list(NodeNG) or None
"""
self.items = items
self.body = body
self.type_annotation = type_annotation
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.items[-1][0].tolineno
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for expr, var in self.items:
yield expr
if var:
yield var
yield from self.body
class AsyncWith(With):
"""Asynchronous ``with`` built with the ``async`` keyword."""
class Yield(NodeNG):
"""Class representing an :class:`ast.Yield` node.
>>> node = astroid.extract_node('yield True')
>>> node
<Yield l.1 at 0x7f23b2e4e5f8>
"""
_astroid_fields = ("value",)
value = None
"""The value to yield.
:type: NodeNG or None
"""
def postinit(self, value=None):
"""Do some setup after initialisation.
:param value: The value to yield.
:type value: NodeNG or None
"""
self.value = value
def get_children(self):
if self.value is not None:
yield self.value
def _get_yield_nodes_skip_lambdas(self):
yield self
class YieldFrom(Yield):
"""Class representing an :class:`ast.YieldFrom` node."""
class DictUnpack(mixins.NoChildrenMixin, NodeNG):
"""Represents the unpacking of dicts into dicts using :pep:`448`."""
class FormattedValue(NodeNG):
"""Class representing an :class:`ast.FormattedValue` node.
Represents a :pep:`498` format string.
>>> node = astroid.extract_node('f"Format {type_}"')
>>> node
<JoinedStr l.1 at 0x7f23b2e4ed30>
>>> node.values
[<Const.str l.1 at 0x7f23b2e4eda0>, <FormattedValue l.1 at 0x7f23b2e4edd8>]
"""
_astroid_fields = ("value", "format_spec")
value = None
"""The value to be formatted into the string.
:type: NodeNG or None
"""
conversion = None
"""The type of formatting to be applied to the value.
.. seealso::
:class:`ast.FormattedValue`
:type: int or None
"""
format_spec = None
"""The formatting to be applied to the value.
.. seealso::
:class:`ast.FormattedValue`
:type: JoinedStr or None
"""
def postinit(self, value, conversion=None, format_spec=None):
"""Do some setup after initialisation.
:param value: The value to be formatted into the string.
:type value: NodeNG
:param conversion: The type of formatting to be applied to the value.
:type conversion: int or None
:param format_spec: The formatting to be applied to the value.
:type format_spec: JoinedStr or None
"""
self.value = value
self.conversion = conversion
self.format_spec = format_spec
def get_children(self):
yield self.value
if self.format_spec is not None:
yield self.format_spec
class JoinedStr(NodeNG):
"""Represents a list of string expressions to be joined.
>>> node = astroid.extract_node('f"Format {type_}"')
>>> node
<JoinedStr l.1 at 0x7f23b2e4ed30>
"""
_astroid_fields = ("values",)
values = None
"""The string expressions to be joined.
:type: list(FormattedValue or Const) or None
"""
def postinit(self, values=None):
"""Do some setup after initialisation.
:param value: The string expressions to be joined.
:type: list(FormattedValue or Const) or None
"""
self.values = values
def get_children(self):
yield from self.values
class Unknown(mixins.AssignTypeMixin, NodeNG):
"""This node represents a node in a constructed AST where
introspection is not possible. At the moment, it's only used in
the args attribute of FunctionDef nodes where function signature
introspection failed.
"""
name = "Unknown"
def qname(self):
return "Unknown"
def infer(self, context=None, **kwargs):
"""Inference on an Unknown node immediately terminates."""
yield util.Uninferable
# constants ##############################################################
CONST_CLS = {
list: List,
tuple: Tuple,
dict: Dict,
set: Set,
type(None): Const,
type(NotImplemented): Const,
}
def _update_const_classes():
"""update constant classes, so the keys of CONST_CLS can be reused"""
klasses = (bool, int, float, complex, str, bytes)
for kls in klasses:
CONST_CLS[kls] = Const
_update_const_classes()
def _two_step_initialization(cls, value):
instance = cls()
instance.postinit(value)
return instance
def _dict_initialization(cls, value):
if isinstance(value, dict):
value = tuple(value.items())
return _two_step_initialization(cls, value)
_CONST_CLS_CONSTRUCTORS = {
List: _two_step_initialization,
Tuple: _two_step_initialization,
Dict: _dict_initialization,
Set: _two_step_initialization,
Const: lambda cls, value: cls(value),
}
def const_factory(value):
"""return an astroid node for a python value"""
# XXX we should probably be stricter here and only consider stuff in
# CONST_CLS or do better treatment: in case where value is not in CONST_CLS,
# we should rather recall the builder on this value than returning an empty
# node (another option being that const_factory shouldn't be called with something
# not in CONST_CLS)
assert not isinstance(value, NodeNG)
# Hack for ignoring elements of a sequence
# or a mapping, in order to avoid transforming
# each element to an AST. This is fixed in 2.0
# and this approach is a temporary hack.
if isinstance(value, (list, set, tuple, dict)):
elts = []
else:
elts = value
try:
initializer_cls = CONST_CLS[value.__class__]
initializer = _CONST_CLS_CONSTRUCTORS[initializer_cls]
return initializer(initializer_cls, elts)
except (KeyError, AttributeError):
node = EmptyNode()
node.object = value
return node
def is_from_decorator(node):
"""Return True if the given node is the child of a decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, Decorators):
return True
parent = parent.parent
return False
| {
"content_hash": "f3780731b2c74696f67dd63cab287358",
"timestamp": "",
"source": "github",
"line_count": 4671,
"max_line_length": 93,
"avg_line_length": 29.190323271248126,
"alnum_prop": 0.5829715140669464,
"repo_name": "ekwoodrich/python-dvrip",
"id": "204d80233a117dd5241d1d358ffa4bb5dddaeee5",
"size": "137726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/astroid/node_classes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5706"
}
],
"symlink_target": ""
} |
from google.cloud import artifactregistry_v1beta2
def sample_delete_repository():
# Create a client
client = artifactregistry_v1beta2.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1beta2.DeleteRepositoryRequest(
name="name_value",
)
# Make the request
operation = client.delete_repository(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END artifactregistry_v1beta2_generated_ArtifactRegistry_DeleteRepository_sync]
| {
"content_hash": "db980e008dc5b38c0a71bedcf9e3fd59",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 26.17391304347826,
"alnum_prop": 0.7342192691029901,
"repo_name": "googleapis/python-artifact-registry",
"id": "62a1456b4c6bf602e8434632998ad52c65ab33ee",
"size": "2020",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/artifactregistry_v1beta2_generated_artifact_registry_delete_repository_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1577437"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
import json
import os
import re
import base64
import datetime
import mistune
import dbconn
import dbutil
from contextlib import suppress
from hashlib import md5
from steem.blockchain import Blockchain
from steem.utils import block_num_from_hash
from bs4 import BeautifulSoup
from langdetect import detect
class Article:
subject = ""
content = ""
text = ""
author = ""
preview = ""
url = ""
tag = []
img = None
hashId = ""
created = None
def run():
conn = dbconn.get_connection()
lastId = dbutil.get_last_blockId(conn)
b = Blockchain()
for block in b.stream_from(start_block=lastId+1, full_blocks=True):
print(block['block_id'])
btxs = list(map(lambda x:x['operations'][0], block['transactions']))
for tx in btxs:
if tx[0] == 'comment' and tx[1]['parent_author'] == '':
try:
meta = json.loads(tx[1]['json_metadata'])
if tx[1]['body'].startswith("@@ ", ) == False:
article = Article()
article.author = tx[1]['author']
article.subject = tx[1]['title']
article.content = tx[1]['body']
article.tag = meta['tags']
html = mistune.markdown(article.content)
article.text = BeautifulSoup(html, "html.parser").get_text().strip()
article.preview = article.text[0:100]
article.hashId = base64.b64encode(md5(article.preview.encode("UTF-8")).digest())[0:32]
article.created = datetime.datetime.strptime(block['timestamp'], '%Y-%m-%dT%H:%M:%S')
article.url = "https://steemkr.com/@" + article.author + "/" + tx[1]['permlink']
if "image" in meta and len(meta['image']) > 0:
article.img = meta['image'][0]
try:
if detect(article.text) == "ko" :
print(meta['tags'])
dbutil.insert_article(conn, article)
except:
pass
except:
pass
dbutil.insert_blockId(conn, block_num_from_hash(block['block_id']), block['timestamp'])
if __name__ == '__main__':
run()
| {
"content_hash": "8ec166774a72f117803187cdceb65aac",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 110,
"avg_line_length": 37.47692307692308,
"alnum_prop": 0.4967159277504105,
"repo_name": "taeminlee/steemtrend.crawler",
"id": "e2df2476032ec233da4e1220202a1e7be604b490",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3705"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.contrib.auth import logout, login, get_user_model
from admin_helper.utils import get_clean_url
from admin_helper.settings import SU_KEY, SU_BACKEND, IGNORE_LIST, GET_USER_FUNC
logger = logging.getLogger(__name__)
class CheckSuMiddleware(object):
"""
Check for necessary settings
"""
def process_view(self, request, view_func, view_args, view_kwargs):
need_check = not (request.is_ajax() or request.path in IGNORE_LIST)
if need_check and SU_KEY in request.session:
url = get_clean_url(request, replace={
SU_KEY: request.session.get(SU_KEY)
})
msg = _('You work under someone else\'s account. Be extremely careful!')
full_msg = mark_safe(u'{message} <a href="{url}">{title}</a>'.format(
message=msg,
url=url,
title=_('Return to your account')
))
replaced, storage = False, messages.get_messages(request)
for msg_obj in storage._loaded_messages:
if msg_obj.message.startswith(msg):
msg_obj.message = full_msg
replaced = True
if not replaced:
messages.add_message(request, messages.WARNING, full_msg)
return None
class SuMiddleware(object):
def process_request(self, request):
result = None
if not request.user.is_authenticated():
return result
current_username = request.user.get_username()
is_key_exist = SU_KEY in request.session and request.session[SU_KEY]
original_username = is_key_exist or current_username
new_username = request.GET.get('su')
need_su = new_username and new_username != current_username
can_su = request.user.is_superuser or settings.DEBUG or original_username == new_username
if need_su and can_su:
logger.info(
'Authorize %s "%s" as "%s"',
'superuser' if request.user.is_superuser else 'user',
original_username,
new_username,
)
UserModel = get_user_model()
try:
user = GET_USER_FUNC(new_username)
# Remove su_key from user session
if SU_KEY in request.session:
logger.debug('Delete original "%s"', original_username)
del request.session[SU_KEY]
UserModel.backend = SU_BACKEND
logout(request)
login(request, user)
# Set session key for new user
if new_username != original_username:
logger.debug('Set user "%s" as original', current_username)
request.session[SU_KEY] = original_username
except UserModel.DoesNotExist:
logger.warning('No such user in system "%s"', new_username)
url = get_clean_url(request, remove_keys=[SU_KEY])
result = HttpResponseRedirect(url)
return result
| {
"content_hash": "273169465ec5b8fc7fc11ab7c94dbb9b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 97,
"avg_line_length": 34.40625,
"alnum_prop": 0.587344838026037,
"repo_name": "truetug/django-admin-helper",
"id": "5e63b92c95667b2e81c1444c48628820770a9bb6",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin_helper/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "HTML",
"bytes": "5119"
},
{
"name": "JavaScript",
"bytes": "6123"
},
{
"name": "Python",
"bytes": "20549"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
class Musician(models.Model):
no=models.IntegerField(primary_key=True)
name=models.CharField('原名',max_length=40)
stagename=models.CharField('艺名',max_length=40,null=True,blank=True)
sex=models.CharField('性别',choices=(('M', '男'), ('F', '女')),max_length=1)
birthday=models.DateTimeField('出生日期')
class RecordCompany(models.Model):
no=models.IntegerField(primary_key=True)
company=models.CharField(max_length=40)
class Album(models.Model):
no=models.IntegerField(primary_key=True)
name=models.CharField(max_length=40)
company=models.ForeignKey(RecordCompany,related_name='RecordCompany_company')
date=models.DateTimeField()
class Music(models.Model):
no=models.IntegerField(primary_key=True)
name=models.CharField(max_length=40)
lyricist=models.ForeignKey(Musician,related_name='Musician_lyricist')
composer=models.ForeignKey(Musician,related_name='Musician_composer')
singer=models.ForeignKey(Musician,related_name='Musician_singer')
album=models.ForeignKey(Musician,related_name='Album_album')
duration=models.DurationField()
class User(models.Model):
no=models.IntegerField(primary_key=True)
user=models.CharField(max_length=8)
passwd=models.CharField(max_length=8)
class PersonalRecord(models.Model):
no=models.IntegerField(primary_key=True)
user=models.ForeignKey(User,related_name='User_user')
song=models.ForeignKey(Music,related_name='Music_song')
plays=models.IntegerField()
like=models.IntegerField()
dislike=models.IntegerField()
| {
"content_hash": "f3fcbf37bd5d76a152db47b4e8f95075",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 81,
"avg_line_length": 39.48780487804878,
"alnum_prop": 0.7492279184681903,
"repo_name": "tea321000/django-project",
"id": "20dc211d607ae7d284c588020ecf01f8f6617b4f",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicsite/music/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "153"
},
{
"name": "HTML",
"bytes": "3864"
},
{
"name": "Python",
"bytes": "30302"
}
],
"symlink_target": ""
} |
import configparser
# Configuring bot
config = configparser.ConfigParser()
config.read_file(open('config.ini'))
support_chat_id = int(config['DEFAULT']['support_chat_id'])
| {
"content_hash": "5deda3f9b5b83f999cc8dd538e816c82",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 24.857142857142858,
"alnum_prop": 0.7528735632183908,
"repo_name": "TheCoreMan/Irgunomator",
"id": "48412ae9325b4178b5421b906315a94566e12022",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12862"
}
],
"symlink_target": ""
} |
import catoclient.catocommand
from catoclient.param import Param
class UpdateCloud(catoclient.catocommand.CatoCommand):
Description = 'Updates the properties of a Cloud Endpoint.'
API = 'update_cloud'
Examples = '''
_Update the address of a vCloud cloud endpoint_
cato-update-cloud -n "vcloud-test" -u "iad2.vcloudservice.vmware.com"
'''
Options = [Param(name='name', short_name='n', long_name='name',
optional=False, ptype='string',
doc='The ID or Name of a Cloud.'),
Param(name='apiurl', short_name='u', long_name='apiurl',
optional=True, ptype='string',
doc='URL of the Cloud API endpoint.'),
Param(name='apiprotocol', short_name='p', long_name='apiprotocol',
optional=True, ptype='string',
doc='Cloud API endpoint protocol.'),
Param(name='default_account', short_name='d', long_name='default_account',
optional=True, ptype='string',
doc='A default Account to be associated with this Cloud.')
]
def main(self):
results = self.call_api(self.API, ['name', 'apiurl', 'apiprotocol', 'default_account'])
print(results)
| {
"content_hash": "f5de7e638e5aed540664f4caf4eac374",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 95,
"avg_line_length": 44.44827586206897,
"alnum_prop": 0.5795190069821567,
"repo_name": "cloudsidekick/catoclient",
"id": "091c38bf6669e5ecf71aeee9a78d9fa745f9247e",
"size": "2015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catoclient/commands/updatecloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "227800"
},
{
"name": "Ruby",
"bytes": "1000"
},
{
"name": "Tcl",
"bytes": "3573"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pickle
from time import time
from twisted.internet.defer import (
FirstError, DeferredList, inlineCallbacks, returnValue)
from twisted.web.http_headers import Headers
from twisted.python.log import msg
from twisted.web.http import NO_CONTENT, NOT_FOUND
from stats import Duration
from httpclient import StringProducer, readBody
class CalDAVAccount(object):
def __init__(self, agent, netloc, user, password, root, principal):
self.agent = agent
self.netloc = netloc
self.user = user
self.password = password
self.root = root
self.principal = principal
def _makeURL(self, path):
if not path.startswith('/'):
raise ValueError("Pass a relative URL with an absolute path")
return 'http://%s%s' % (self.netloc, path)
def deleteResource(self, path):
url = self._makeURL(path)
d = self.agent.request('DELETE', url)
def deleted(response):
if response.code not in (NO_CONTENT, NOT_FOUND):
raise Exception(
"Unexpected response to DELETE %s: %d" % (
url, response.code))
d.addCallback(deleted)
return d
def makeCalendar(self, path):
return self.agent.request('MKCALENDAR', self._makeURL(path))
def writeData(self, path, data, contentType):
return self.agent.request(
'PUT',
self._makeURL(path),
Headers({'content-type': [contentType]}),
StringProducer(data))
@inlineCallbacks
def _serial(fs):
for (f, args) in fs:
yield f(*args)
returnValue(None)
def initialize(agent, host, port, user, password, root, principal, calendar):
"""
If the specified calendar exists, delete it. Then re-create it empty.
"""
account = CalDAVAccount(
agent,
"%s:%d" % (host, port),
user=user, password=password,
root=root, principal=principal)
cal = "/calendars/users/%s/%s/" % (user, calendar)
d = _serial([
(account.deleteResource, (cal,)),
(account.makeCalendar, (cal,))])
d.addCallback(lambda ignored: account)
return d
def firstResult(deferreds):
"""
Return a L{Deferred} which fires when the first L{Deferred} from
C{deferreds} fires.
@param deferreds: A sequence of Deferreds to wait on.
"""
@inlineCallbacks
def sample(dtrace, sampleTime, agent, paramgen, responseCode, concurrency=1):
urlopen = Duration('HTTP')
data = {urlopen: []}
def once():
msg('emitting request')
before = time()
params = paramgen()
d = agent.request(*params)
def cbResponse(response):
if response.code != responseCode:
raise Exception(
"Request %r received unexpected response code: %d" % (
params, response.code))
d = readBody(response)
def cbBody(ignored):
after = time()
msg('response received')
# Give things a moment to settle down. This is a hack
# to try to collect the last of the dtrace output
# which may still be sitting in the write buffer of
# the dtrace process. It would be nice if there were
# a more reliable way to know when we had it all, but
# no luck on that front so far. The implementation of
# mark is supposed to take care of that, but the
# assumption it makes about ordering of events appears
# to be invalid.
# XXX Disabled until I get a chance to seriously
# measure what affect, if any, it has.
# d = deferLater(reactor, 0.5, dtrace.mark)
d = dtrace.mark()
def cbStats(stats):
msg('stats collected')
for k, v in stats.iteritems():
data.setdefault(k, []).append(v)
data[urlopen].append(after - before)
d.addCallback(cbStats)
return d
d.addCallback(cbBody)
return d
d.addCallback(cbResponse)
return d
msg('starting dtrace')
yield dtrace.start()
msg('dtrace started')
start = time()
requests = []
for _ignore_i in range(concurrency):
requests.append(once())
while requests:
try:
_ignore_result, index = yield DeferredList(requests, fireOnOneCallback=True, fireOnOneErrback=True)
except FirstError, e:
e.subFailure.raiseException()
# Get rid of the completed Deferred
del requests[index]
if time() > start + sampleTime:
# Wait for the rest of the outstanding requests to keep things tidy
yield DeferredList(requests)
# And then move on
break
else:
# And start a new operation to replace it
try:
requests.append(once())
except StopIteration:
# Ran out of work to do, so paramgen raised a
# StopIteration. This is pretty sad. Catch it or it
# will demolish inlineCallbacks.
if len(requests) == concurrency - 1:
msg('exhausted parameter generator')
msg('stopping dtrace')
leftOver = yield dtrace.stop()
msg('dtrace stopped')
for (k, v) in leftOver.items():
if v:
print('Extra', k, ':', v)
returnValue(data)
def select(statistics, benchmark, parameter, statistic):
for stat, samples in statistics[benchmark][int(parameter)].iteritems():
if stat.name == statistic:
return (stat, samples)
raise ValueError("Unknown statistic %r" % (statistic,))
def load_stats(statfiles):
data = []
for fname in statfiles:
fname, bench, param, stat = fname.split(',')
stats, samples = select(
pickle.load(file(fname)), bench, param, stat)
data.append((stats, samples))
if data:
assert len(samples) == len(data[0][1])
return data
| {
"content_hash": "27c9023c5000488cf68033d034ab21e0",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 111,
"avg_line_length": 31,
"alnum_prop": 0.5737783455764931,
"repo_name": "trevor/calendarserver",
"id": "34dba6be54d45449d756497bb457d3ce73bff644",
"size": "6872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/performance/benchlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "D",
"bytes": "13143"
},
{
"name": "JavaScript",
"bytes": "76566"
},
{
"name": "Python",
"bytes": "9260291"
},
{
"name": "Shell",
"bytes": "78964"
}
],
"symlink_target": ""
} |
"""Abstract adapter class."""
from __future__ import absolute_import
import logging
from pprint import pformat
from functools import wraps
from six import Iterator
from pyasn1.type.univ import Null
from snmp_orm.config import DEBUG
from snmp_orm.settings import SnmpV2Settings, SnmpV3Settings
from snmp_orm.utils import str_to_oid
logger = logging.getLogger(__name__)
class AbstractException(Exception):
pass
def log(f):
@wraps(f)
def inner_wrapper(self, *args):
logger.debug("[%s] Call %s%s" % (self.host, f.__name__, pformat(args)))
result = f(self, *args)
logger.debug("[%s] %s return %s" % (self.host, f.__name__, pformat(result)))
return result
if DEBUG:
return inner_wrapper
else:
return f
class Walker(Iterator):
"""SNMP walker class"""
def __init__(self, agent, baseoid, use_bulk=True, bulk_rows=None):
self.baseoid = baseoid
self.baseoid_len = len(baseoid)
self.lastoid = baseoid
self.agent = agent
self.use_bulk = use_bulk
self.bulk_rows = bulk_rows
self.raise_stop = False
def __iter__(self):
return self
def __next__(self):
if self.raise_stop:
raise StopIteration()
if self.use_bulk:
rows = self.agent.getbulk(self.bulk_rows, self.lastoid)
else:
rows = self.agent.getnext(self.lastoid)
if not rows:
raise StopIteration()
if self.use_bulk:
slice = 0
for oid, _ in reversed(rows):
diff = self.baseoid_len - len(oid)
if (diff == 0 and oid[:-1] == self.baseoid[:-1]) or \
(diff != 0 and oid[:diff] == self.baseoid):
break
else:
slice += 1
if slice > 0:
rows = rows[:0 - slice]
self.raise_stop = True
if not rows:
raise StopIteration()
self.lastoid = rows[-1][0]
return rows
class AbstractAdapter(object):
def __init__(self, settings_read, settings_write=None):
settings_write = settings_write or settings_read.__class__()
assert settings_write.__class__ == settings_read.__class__
_settings_write = settings_write.__class__()
_settings_write.update(settings_read)
_settings_write.update(settings_write)
# Create session for host
if isinstance(settings_read, SnmpV3Settings):
session_getter = self.get_snmp_v3_session
elif isinstance(settings_read, SnmpV2Settings):
session_getter = self.get_snmp_v2_session
else:
raise TypeError
self.host = settings_read["host"]
self.settings_read = settings_read
self.settings_write = _settings_write
self.session_read = session_getter(**settings_read.prepare_kwargs())
if settings_read == _settings_write:
self.session_write = self.session_read
else:
self.session_write = session_getter(**_settings_write.prepare_kwargs())
def get_snmp_v2_session(self, host, port, version, community, **kwargs):
raise NotImplementedError()
def get_snmp_v3_session(self, host, port, version, sec_name=None, sec_level=None,
auth_protocol=None, auth_passphrase=None,
priv_protocol=None, priv_passphrase=None, **kwargs):
raise NotImplementedError()
@log
def get(self, *args):
"""Return tuple of pairs:
.. code-block:: python
((1, 3, 6, 1, 2, 1, 1, 1, 0),
OctetString('DGS-3100-24 Gigabit stackable L2 Managed Switch'))
"""
return self.session_read.get(*map(str_to_oid, args))
def get_one(self, oid):
"""Return oid value."""
variables = self.get(oid)
if variables:
result = variables[0][1]
if not isinstance(result, Null):
return result
return None
@log
def getnext(self, *args):
"""Return table:
.. code-block:: python
[((1, 3, 6, 1, 2, 1, 1, 1, 0),
OctetString('DGS-3100-24 Gigabit stackable L2 Managed Switch')),
((1, 3, 6, 1, 2, 1, 1, 2, 0),
ObjectIdentifier('1.3.6.1.4.1.171.10.94.1')),
((1, 3, 6, 1, 2, 1, 1, 3, 0),
TimeTicks('512281800')),
((1, 3, 6, 1, 2, 1, 1, 4, 0),
OctetString(''))]
"""
return self.session_read.getnext(*map(str_to_oid, args))
@log
def getbulk(self, rows=None, *args):
"""Return same as getnext method, but use rows number."""
if rows is None:
rows = self.settings_read["bulk_rows"]
return self.session_read.getbulk(rows, *map(str_to_oid, args))
@log
def set(self, *args):
#TODO: set more than one values
return self.session_write.set(args)
def walk(self, oid):
"""Collect all rows in given OID."""
oid = str_to_oid(oid)
result = []
walker = Walker(self, oid,
use_bulk=self.settings_read["use_bulk"],
bulk_rows=self.settings_read["bulk_rows"])
for rows in walker:
result.extend(rows)
return result
| {
"content_hash": "9bf8df51d7c68e79d29146816ea19b8a",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 87,
"avg_line_length": 30.693181818181817,
"alnum_prop": 0.5497963717141799,
"repo_name": "blackwithwhite666/snmp_orm",
"id": "58567a3b8fa54bf3c3ea178383ff93d77c20aa81",
"size": "5402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snmp_orm/adapters/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70391"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
"""Serves content for "script" handlers using an HTTP runtime.
http_runtime supports two ways to start the runtime instance.
START_PROCESS sends the runtime_config protobuf (serialized and base64 encoded
as not all platforms support binary data over stdin) to the runtime instance
over stdin and requires the runtime instance to send the port it is listening on
over stdout.
START_PROCESS_FILE creates two temporary files and adds the paths of both files
to the runtime instance command line. The first file is written by http_runtime
with the runtime_config proto (serialized); the runtime instance is expected to
delete the file after reading it. The second file is written by the runtime
instance with the port it is listening on (the line must be newline terminated);
http_runtime is expected to delete the file after reading it.
TODO: convert all runtimes to START_PROCESS_FILE.
"""
import base64
import logging
import os
import subprocess
import sys
import time
import threading
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import http_proxy
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import safe_subprocess
from google.appengine.tools.devappserver2 import tee
START_PROCESS = -1
START_PROCESS_FILE = -2
def _sleep_between_retries(attempt, max_attempts, sleep_base):
"""Sleep between retry attempts.
Do an exponential backoff between retry attempts on an operation. The general
pattern for use is:
for attempt in range(max_attempts):
# Try operation, either return or break on success
_sleep_between_retries(attempt, max_attempts, sleep_base)
Args:
attempt: Which attempt just failed (0 based).
max_attempts: The maximum number of attempts that will be made.
sleep_base: How long in seconds to sleep between the first and second
attempt (the time will be doubled between each successive attempt). The
value may be any numeric type that is convertible to float (complex
won't work but user types that are sufficiently numeric-like will).
"""
# Don't sleep after the last attempt as we're about to give up.
if attempt < (max_attempts - 1):
time.sleep((2 ** attempt) * sleep_base)
def _remove_retry_sharing_violation(path, max_attempts=10, sleep_base=.125):
"""Removes a file (with retries on Windows for sharing violations).
Args:
path: The filesystem path to remove.
max_attempts: The maximum number of attempts to try to remove the path
before giving up.
sleep_base: How long in seconds to sleep between the first and second
attempt (the time will be doubled between each successive attempt). The
value may be any numeric type that is convertible to float (complex
won't work but user types that are sufficiently numeric-like will).
Raises:
WindowsError: When an error other than a sharing violation occurs.
"""
if sys.platform == 'win32':
for attempt in range(max_attempts):
try:
os.remove(path)
break
except WindowsError as e:
import winerror
# Sharing violations are expected to occasionally occur when the runtime
# instance is context swapped after writing the port but before closing
# the file. Ignore these and try again.
if e.winerror != winerror.ERROR_SHARING_VIOLATION:
raise
_sleep_between_retries(attempt, max_attempts, sleep_base)
else:
logging.warn('Unable to delete %s', path)
else:
os.remove(path)
class HttpRuntimeProxy(instance.RuntimeProxy):
"""Manages a runtime subprocess used to handle dynamic content."""
_VALID_START_PROCESS_FLAVORS = [START_PROCESS, START_PROCESS_FILE]
# TODO: Determine if we can always use SIGTERM.
# Set this to True to quit with SIGTERM rather than SIGKILL
_quit_with_sigterm = False
@classmethod
def stop_runtimes_with_sigterm(cls, quit_with_sigterm):
"""Configures the http_runtime module to kill the runtimes with SIGTERM.
Args:
quit_with_sigterm: True to enable stopping runtimes with SIGTERM.
Returns:
The previous value.
"""
previous_quit_with_sigterm = cls._quit_with_sigterm
cls._quit_with_sigterm = quit_with_sigterm
return previous_quit_with_sigterm
def __init__(self, args, runtime_config_getter, module_configuration,
env=None, start_process_flavor=START_PROCESS):
"""Initializer for HttpRuntimeProxy.
Args:
args: Arguments to use to start the runtime subprocess.
runtime_config_getter: A function that can be called without arguments
and returns the runtime_config_pb2.Config containing the configuration
for the runtime.
module_configuration: An application_configuration.ModuleConfiguration
instance respresenting the configuration of the module that owns the
runtime.
env: A dict of environment variables to pass to the runtime subprocess.
start_process_flavor: Which version of start process to start your
runtime process. SUpported flavors are START_PROCESS and
START_PROCESS_FILE.
Raises:
ValueError: An unknown value for start_process_flavor was used.
"""
super(HttpRuntimeProxy, self).__init__()
self._process = None
self._process_lock = threading.Lock() # Lock to guard self._process.
self._stderr_tee = None
self._runtime_config_getter = runtime_config_getter
self._args = args
self._module_configuration = module_configuration
self._env = env
if start_process_flavor not in self._VALID_START_PROCESS_FLAVORS:
raise ValueError('Invalid start_process_flavor.')
self._start_process_flavor = start_process_flavor
self._proxy = None
def _get_instance_logs(self):
# Give the runtime process a bit of time to write to stderr.
time.sleep(0.1)
return self._stderr_tee.get_buf()
def _instance_died_unexpectedly(self):
with self._process_lock:
return self._process and self._process.poll() is not None
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Serves this request by forwarding it to the runtime process.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Yields:
A sequence of strings containing the body of the HTTP response.
"""
return self._proxy.handle(environ, start_response, url_map, match,
request_id, request_type)
def _read_start_process_file(self, max_attempts=10, sleep_base=.125):
"""Read the single line response expected in the start process file.
The START_PROCESS_FILE flavor uses a file for the runtime instance to
report back the port it is listening on. We can't rely on EOF semantics
as that is a race condition when the runtime instance is simultaneously
writing the file while the devappserver process is reading it; rather we
rely on the line being terminated with a newline.
Args:
max_attempts: The maximum number of attempts to read the line.
sleep_base: How long in seconds to sleep between the first and second
attempt (the time will be doubled between each successive attempt). The
value may be any numeric type that is convertible to float (complex
won't work but user types that are sufficiently numeric-like will).
Returns:
If a full single line (as indicated by a newline terminator) is found, all
data read up to that point is returned; return an empty string if no
newline is read before the process exits or the max number of attempts are
made.
"""
try:
for attempt in range(max_attempts):
# Yes, the final data may already be in the file even though the
# process exited. That said, since the process should stay alive
# if it's exited we don't care anyway.
if self._process.poll() is not None:
return ''
# On Mac, if the first read in this process occurs before the data is
# written, no data will ever be read by this process without the seek.
self._process.child_out.seek(0)
line = self._process.child_out.read()
if '\n' in line:
return line
_sleep_between_retries(attempt, max_attempts, sleep_base)
finally:
self._process.child_out.close()
return ''
def start(self):
"""Starts the runtime process and waits until it is ready to serve."""
runtime_config = self._runtime_config_getter()
# TODO: Use a different process group to isolate the child process
# from signals sent to the parent. Only available in subprocess in
# Python 2.7.
assert self._start_process_flavor in self._VALID_START_PROCESS_FLAVORS
if self._start_process_flavor == START_PROCESS:
serialized_config = base64.b64encode(runtime_config.SerializeToString())
with self._process_lock:
assert not self._process, 'start() can only be called once'
self._process = safe_subprocess.start_process(
self._args,
serialized_config,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env,
cwd=self._module_configuration.application_root)
line = self._process.stdout.readline()
elif self._start_process_flavor == START_PROCESS_FILE:
serialized_config = runtime_config.SerializeToString()
with self._process_lock:
assert not self._process, 'start() can only be called once'
self._process = safe_subprocess.start_process_file(
args=self._args,
input_string=serialized_config,
env=self._env,
cwd=self._module_configuration.application_root,
stderr=subprocess.PIPE)
line = self._read_start_process_file()
_remove_retry_sharing_violation(self._process.child_out.name)
# _stderr_tee may be pre-set by unit tests.
if self._stderr_tee is None:
self._stderr_tee = tee.Tee(self._process.stderr, sys.stderr)
self._stderr_tee.start()
port = None
error = None
try:
port = int(line)
except ValueError:
error = 'bad runtime process port [%r]' % line
logging.error(error)
finally:
self._proxy = http_proxy.HttpProxy(
host='localhost', port=port,
instance_died_unexpectedly=self._instance_died_unexpectedly,
instance_logs_getter=self._get_instance_logs,
error_handler_file=application_configuration.get_app_error_file(
self._module_configuration),
prior_error=error)
self._proxy.wait_for_connection()
def quit(self):
"""Causes the runtime process to exit."""
with self._process_lock:
assert self._process, 'module was not running'
try:
if HttpRuntimeProxy._quit_with_sigterm:
logging.debug('Calling process.terminate on child runtime.')
self._process.terminate()
else:
self._process.kill()
except OSError:
pass
# Mac leaks file descriptors without call to join. Suspect a race
# condition where the interpreter is unable to close the subprocess pipe
# as the thread hasn't returned from the readline call.
self._stderr_tee.join(5)
self._process = None
| {
"content_hash": "8d43d77c28d11fc4f0720909e8896432",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 80,
"avg_line_length": 39.972972972972975,
"alnum_prop": 0.6951487491548344,
"repo_name": "ychen820/microblog",
"id": "0beff17ef42577e79eaa67e65272b656f9d25902",
"size": "12433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/http_runtime.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from django import forms
from django.core.validators import RegexValidator
from django.db import IntegrityError
from karaoke.models import Request
class RequestForm(forms.Form):
ultrastarValidator = RegexValidator(r'^https:\/\/ultrastar-es\.org\/en\/canciones\?.*$', message="Invalid url")
anilistValidator = RegexValidator(r'^https:\/\/anilist\.co\/anime\/[0-9]*\/?.*$', message="Invalid url")
title = forms.CharField(label="Song title", max_length=200)
artist = forms.CharField(label="Song artist", max_length=200)
ultrastar_url = forms.URLField(label="Ultrastar url", validators=[ultrastarValidator])
anilist_url = forms.URLField(label="Anilist url", validators=[anilistValidator], required=False)
def submit(self):
# Check request is unique
requests = Request.objects.filter(artist=self.cleaned_data['artist'],
title=self.cleaned_data['title']).distinct().order_by("title")
if requests:
return False
else:
r = Request()
r.title = self.cleaned_data['title']
r.artist = self.cleaned_data['artist']
r.ultrastar_url = self.cleaned_data['ultrastar_url']
r.anilist_url = self.cleaned_data['anilist_url']
try:
r.save()
except IntegrityError:
return False
return True
| {
"content_hash": "931b8a66c9499233f9fda8fcd13c503d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 115,
"avg_line_length": 41.529411764705884,
"alnum_prop": 0.626770538243626,
"repo_name": "WarwickAnimeSoc/aniMango",
"id": "9bf7b9767010d14aa360c8d2fa57943b44b7fa13",
"size": "1412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "karaoke/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14554"
},
{
"name": "HTML",
"bytes": "145725"
},
{
"name": "JavaScript",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "197412"
}
],
"symlink_target": ""
} |
import os
import glob
import json
from setup_app import paths
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils import base
from setup_app.installers.node import NodeInstaller
class PassportInstaller(NodeInstaller):
def __init__(self):
setattr(base.current_app, self.__class__.__name__, self)
self.service_name = 'passport'
self.service_user = Config.node_user
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installPassport'
self.register_progess()
passport_version = Config.oxVersion.replace('-SNAPSHOT','').replace('.Final','')
self.source_files = [
(os.path.join(Config.distGluuFolder, 'passport.tgz'), Config.maven_root + '/npm/passport/passport-{}.tgz'.format(passport_version)),
(os.path.join(Config.distGluuFolder, 'passport-node_modules.tar.gz'), Config.maven_root + '/npm/passport/passport-version_{}-node_modules.tar.gz'.format(passport_version))
]
self.gluu_passport_base = os.path.join(self.node_base, 'passport')
self.passport_initd_script = os.path.join(Config.install_dir, 'static/system/initd/passport')
self.passport_config = os.path.join(Config.configFolder, 'passport-config.json')
self.passport_templates_folder = os.path.join(Config.templateFolder, 'passport')
self.ldif_scripts_fn = os.path.join(Config.outputFolder, 'passport/scripts.ldif')
self.passport_oxtrust_config_fn = os.path.join(Config.outputFolder, 'passport/passport_oxtrust_config.son')
self.passport_central_config_json = os.path.join(Config.outputFolder, 'passport/passport-central-config.json')
self.ldif_passport_config = os.path.join(Config.outputFolder, 'passport/oxpassport-config.ldif')
self.ldif_passport = os.path.join(Config.outputFolder, 'passport/passport.ldif')
self.ldif_passport_clients = os.path.join(Config.outputFolder, 'passport/passport_clients.ldif')
self.passport_rs_client_jks_fn = os.path.join(Config.certFolder, 'passport-rs.jks')
self.passport_rp_client_jks_fn = os.path.join(Config.certFolder, 'passport-rp.jks')
self.passport_rp_client_cert_fn = os.path.join(Config.certFolder, 'passport-rp.pem')
self.passportSpTLSCACert = os.path.join(Config.certFolder, 'passport-sp.pem')
self.passportSpTLSCert = os.path.join(Config.certFolder, 'passport-sp.crt')
self.passportSpTLSKey = os.path.join(Config.certFolder, 'passport-sp.key')
self.passportSpJksFn = os.path.join(Config.certFolder, 'passport-sp.jks')
def install(self):
self.logIt("Preparing passport service base folders")
self.run([paths.cmd_mkdir, '-p', self.gluu_passport_base])
self.extract_passport()
self.extract_modules()
# Copy init.d script
self.copyFile(self.passport_initd_script, Config.gluuOptSystemFolder)
self.run([paths.cmd_chmod, '-R', "755", os.path.join(Config.gluuOptSystemFolder, 'passport')])
# Install passport system service script
self.installNodeService('passport')
self.chown(self.gluu_passport_base, Config.node_user, Config.gluu_group, recursive=True)
# enable service at startup
self.enable()
def extract_passport(self):
# Extract package
try:
self.logIt("Extracting {} into {}".format(self.source_files[0][0], self.gluu_passport_base))
self.run([paths.cmd_tar, '--strip', '1', '-xzf', self.source_files[0][0], '-C', self.gluu_passport_base, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.logIt("Error encountered while extracting archive {}".format(self.source_files[0][0]))
def extract_modules(self):
modules_target_dir = os.path.join(self.gluu_passport_base, 'node_modules')
modules_source_dir = os.path.dirname(self.source_files[1][0])
self.run([paths.cmd_mkdir, '-p', modules_target_dir])
node_modules_list = glob.glob(os.path.join(modules_source_dir, 'passport*node_modules*'))
if node_modules_list:
passport_modules_archive = max(node_modules_list)
self.logIt("Extracting passport node modules")
self.run([paths.cmd_tar, '--strip', '1', '-xzf', passport_modules_archive, '-C', modules_target_dir, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
else:
# Install dependencies
try:
self.logIt("Running npm install in %s" % self.gluu_passport_base)
nodeEnv = os.environ.copy()
nodeEnv['PATH'] = ':'.join((os.path.join(Config.node_home, 'bin'), nodeEnv['PATH']))
cmd_npm = os.path.join(Config.node_home, 'bin', 'npm')
self.run([cmd_npm, 'install', '-P'], self.gluu_passport_base, nodeEnv, True)
except:
self.logIt("Error encountered running npm install in {}".format(self.gluu_passport_base))
def installed(self):
return os.path.exists(self.gluu_passport_base)
def generate_configuration(self):
self.logIt("Generating Passport configuration")
if not Config.get('passportSpKeyPass'):
Config.passportSpKeyPass = self.getPW()
Config.passportSpJksPass = self.getPW()
if not Config.get('passport_rp_client_cert_alg'):
Config.passport_rp_client_cert_alg = 'RS512'
if not Config.get('passport_rp_client_jks_pass'):
Config.passport_rp_client_jks_pass = self.getPW()
if not Config.get('passport_rs_client_jks_pass'):
Config.passport_rs_client_jks_pass = self.getPW()
Config.passport_rs_client_jks_pass_encoded = self.obscure(Config.passport_rs_client_jks_pass)
client_var_id_list = (
('passport_rs_client_id', '1501.'),
('passport_rp_client_id', '1502.'),
('passport_rp_ii_client_id', '1503.'),
)
self.check_clients(client_var_id_list)
self.check_clients([('passport_resource_id', '1504.')], resource=True)
# backup existing files
for f in glob.glob(os.path.join(Config.certFolder, 'passport-*')):
if not f.endswith('~'):
self.backupFile(f, move=True)
# create certificates
self.gen_cert('passport-sp', Config.passportSpKeyPass, 'ldap', Config.ldap_hostname)
Config.passport_rs_client_jwks = self.gen_openid_data_store_keys(self.passport_rs_client_jks_fn, Config.passport_rs_client_jks_pass)
Config.templateRenderingDict['passport_rs_client_base64_jwks'] = self.generate_base64_string(Config.passport_rs_client_jwks, 1)
Config.passport_rp_client_jwks = self.gen_openid_data_store_keys(self.passport_rp_client_jks_fn, Config.passport_rp_client_jks_pass)
Config.templateRenderingDict['passport_rp_client_base64_jwks'] = self.generate_base64_string(Config.passport_rp_client_jwks, 1)
self.logIt("Preparing Passport OpenID RP certificate...")
passport_rp_client_jwks_json = json.loads(''.join(Config.passport_rp_client_jwks))
for jwks_key in passport_rp_client_jwks_json["keys"]:
if jwks_key["alg"] == Config.passport_rp_client_cert_alg:
Config.passport_rp_client_cert_alias = jwks_key["kid"]
break
self.export_openid_key(self.passport_rp_client_jks_fn, Config.passport_rp_client_jks_pass, Config.passport_rp_client_cert_alias, self.passport_rp_client_cert_fn)
# set owner and mode of certificate files
cert_files = glob.glob(os.path.join(Config.certFolder, 'passport*'))
for fn in cert_files:
self.run([paths.cmd_chmod, '440', fn])
self.chown(fn, Config.root_user, Config.gluu_user)
def render_import_templates(self):
self.logIt("Rendering Passport templates")
output_folder = os.path.join(Config.outputFolder,'passport')
self.renderTemplateInOut(self.passport_config, self.passport_templates_folder, Config.configFolder)
self.renderTemplateInOut(self.passport_central_config_json, self.passport_templates_folder, output_folder)
Config.templateRenderingDict['passport_central_config_base64'] = self.generate_base64_ldap_file(self.passport_central_config_json)
scripts_template = os.path.join(self.passport_templates_folder, os.path.basename(self.ldif_scripts_fn))
extensions = base.find_script_names(scripts_template)
self.prepare_base64_extension_scripts(extensions=extensions)
for tmp in (
self.passport_oxtrust_config_fn,
self.ldif_scripts_fn,
self.passport_config,
self.ldif_passport,
self.ldif_passport_clients,
self.ldif_passport_config,
):
self.renderTemplateInOut(tmp, self.passport_templates_folder, output_folder)
ldif_files = (self.ldif_scripts_fn, self.ldif_passport, self.ldif_passport_config, self.ldif_passport_clients)
self.dbUtils.import_ldif(ldif_files)
def update_backend(self):
self.dbUtils.enable_service('gluuPassportEnabled')
for inum in ['2FDB-CF02', 'D40C-1CA4', '2DAF-F9A5']:
self.dbUtils.enable_script(inum)
passport_oxtrust_config = base.readJsonFile(self.passport_oxtrust_config_fn)
self.dbUtils.set_oxTrustConfApplication(passport_oxtrust_config)
self.dbUtils.set_configuration('gluuPassportEnabled', 'true')
self.dbUtils.add_client2script('2DAF-F9A5', Config.passport_rp_client_id)
self.dbUtils.add_client2script('2DAF-F995', Config.passport_rp_client_id)
def create_folders(self):
# Create logs folder
self.run([paths.cmd_mkdir, '-p', os.path.join(self.gluu_passport_base, 'logs')])
#create empty log file unless exists
log_file = os.path.join(self.gluu_passport_base, 'logs/start.log')
if not os.path.exists(log_file):
self.writeFile(log_file, '')
| {
"content_hash": "0d24c9327eff279b250c1917d79cb7b1",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 187,
"avg_line_length": 48.29716981132076,
"alnum_prop": 0.6529934563922258,
"repo_name": "GluuFederation/community-edition-setup",
"id": "a54e4dfdc825b3de53c2549da3d30636f7233bee",
"size": "10239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup_app/installers/passport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "111434"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Python",
"bytes": "1927650"
},
{
"name": "Shell",
"bytes": "68144"
}
],
"symlink_target": ""
} |
from .resource_update import ResourceUpdate
class SnapshotUpdate(ResourceUpdate):
"""Snapshot update resource.
:param tags: Resource tags
:type tags: dict
:param account_type: the storage account type of the disk. Possible values
include: 'Standard_LRS', 'Premium_LRS'
:type account_type: str or :class:`StorageAccountTypes
<azure.mgmt.compute.models.StorageAccountTypes>`
:param os_type: the Operating System type. Possible values include:
'Windows', 'Linux'
:type os_type: str or :class:`OperatingSystemTypes
<azure.mgmt.compute.models.OperatingSystemTypes>`
:param creation_data: disk source information. CreationData information
cannot be changed after the disk has been created.
:type creation_data: :class:`CreationData
<azure.mgmt.compute.models.CreationData>`
:param disk_size_gb: If creationData.createOption is Empty, this field is
mandatory and it indicates the size of the VHD to create. If this field is
present for updates or creation with other options, it indicates a resize.
Resizes are only allowed if the disk is not attached to a running VM, and
can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings: Encryption settings for disk or snapshot
:type encryption_settings: :class:`EncryptionSettings
<azure.mgmt.compute.models.EncryptionSettings>`
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'account_type': {'key': 'properties.accountType', 'type': 'StorageAccountTypes'},
'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings': {'key': 'properties.encryptionSettings', 'type': 'EncryptionSettings'},
}
def __init__(self, tags=None, account_type=None, os_type=None, creation_data=None, disk_size_gb=None, encryption_settings=None):
super(SnapshotUpdate, self).__init__(tags=tags)
self.account_type = account_type
self.os_type = os_type
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.encryption_settings = encryption_settings
| {
"content_hash": "3ac507438666ee5d293bc5208527bfb2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 132,
"avg_line_length": 49.51063829787234,
"alnum_prop": 0.690159003008165,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "c1adb27cea7e28a14cfa19a9172e5be0c06936bf",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/models/snapshot_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
from designateclient import exceptions
from designateclient import v1 as client
from designateclient.v1 import domains
from designateclient.v1 import records
from heat.common import exception as heat_exception
from heat.engine.clients import client_plugin
from heat.engine import constraints
CLIENT_NAME = 'designate'
class DesignateClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions]
service_types = [DNS] = ['dns']
def _create(self):
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
return client.Client(session=self.context.keystone_session,
endpoint_type=endpoint_type,
service_type=self.DNS)
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def get_domain_id(self, domain_id_or_name):
try:
domain_obj = self.client().domains.get(domain_id_or_name)
return domain_obj.id
except exceptions.NotFound:
for domain in self.client().domains.list():
if domain.name == domain_id_or_name:
return domain.id
raise heat_exception.EntityNotFound(entity='Designate Domain',
name=domain_id_or_name)
def domain_create(self, **kwargs):
domain = domains.Domain(**kwargs)
return self.client().domains.create(domain)
def domain_update(self, **kwargs):
# Designate mandates to pass the Domain object with updated properties
domain = self.client().domains.get(kwargs['id'])
for key in kwargs.keys():
setattr(domain, key, kwargs[key])
return self.client().domains.update(domain)
def record_create(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
record = records.Record(**kwargs)
return self.client().records.create(domain_id, record)
def record_update(self, **kwargs):
# Designate mandates to pass the Record object with updated properties
domain_id = self.get_domain_id(kwargs.pop('domain'))
record = self.client().records.get(domain_id, kwargs['id'])
for key in kwargs.keys():
setattr(record, key, kwargs[key])
return self.client().records.update(record.domain_id, record)
def record_delete(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
return self.client().records.delete(domain_id,
kwargs.pop('id'))
def record_show(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
return self.client().records.get(domain_id,
kwargs.pop('id'))
class DesignateDomainConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_domain_id'
| {
"content_hash": "f5ab83c633f52fb0f2b9597433e25e15",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.6284935241990457,
"repo_name": "cwolferh/heat-scratch",
"id": "88d0bbe5e2522d136ccd234b06805bb6a29a5111",
"size": "3509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/clients/os/designate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
"""Imports objects from music modules into the top-level music namespace."""
from magenta.music.chord_symbols_lib import ChordSymbolException
from magenta.music.chord_symbols_lib import ChordSymbolFunctions
from magenta.music.chords_encoder_decoder import ChordEncodingException
from magenta.music.chords_encoder_decoder import MajorMinorChordOneHotEncoding
from magenta.music.chords_encoder_decoder import PitchChordsEncoderDecoder
from magenta.music.chords_encoder_decoder import TriadChordOneHotEncoding
from magenta.music.chords_lib import BasicChordRenderer
from magenta.music.chords_lib import ChordProgression
from magenta.music.chords_lib import extract_chords
from magenta.music.chords_lib import extract_chords_for_melodies
from magenta.music.constants import * # pylint: disable=wildcard-import
from magenta.music.drums_encoder_decoder import MultiDrumOneHotEncoding
from magenta.music.drums_lib import DrumTrack
from magenta.music.drums_lib import extract_drum_tracks
from magenta.music.drums_lib import midi_file_to_drum_track
from magenta.music.encoder_decoder import ConditionalEventSequenceEncoderDecoder
from magenta.music.encoder_decoder import EventSequenceEncoderDecoder
from magenta.music.encoder_decoder import LookbackEventSequenceEncoderDecoder
from magenta.music.encoder_decoder import OneHotEncoding
from magenta.music.encoder_decoder import OneHotEventSequenceEncoderDecoder
from magenta.music.events_lib import NonIntegerStepsPerBarException
from magenta.music.lead_sheets_lib import extract_lead_sheet_fragments
from magenta.music.lead_sheets_lib import LeadSheet
from magenta.music.melodies_lib import BadNoteException
from magenta.music.melodies_lib import extract_melodies
from magenta.music.melodies_lib import Melody
from magenta.music.melodies_lib import midi_file_to_melody
from magenta.music.melodies_lib import PolyphonicMelodyException
from magenta.music.melody_encoder_decoder import KeyMelodyEncoderDecoder
from magenta.music.melody_encoder_decoder import MelodyOneHotEncoding
from magenta.music.midi_io import midi_file_to_sequence_proto
from magenta.music.midi_io import midi_to_sequence_proto
from magenta.music.midi_io import MIDIConversionError
from magenta.music.midi_io import sequence_proto_to_midi_file
from magenta.music.midi_io import sequence_proto_to_pretty_midi
from magenta.music.midi_synth import fluidsynth
from magenta.music.midi_synth import synthesize
from magenta.music.model import BaseModel
from magenta.music.musicxml_parser import MusicXMLDocument
from magenta.music.musicxml_parser import MusicXMLParseException
from magenta.music.musicxml_reader import musicxml_file_to_sequence_proto
from magenta.music.musicxml_reader import musicxml_to_sequence_proto
from magenta.music.musicxml_reader import MusicXMLConversionError
from magenta.music.notebook_utils import play_sequence
from magenta.music.sequence_generator import BaseSequenceGenerator
from magenta.music.sequence_generator import SequenceGeneratorException
from magenta.music.sequence_generator_bundle import GeneratorBundleParseException
from magenta.music.sequence_generator_bundle import read_bundle_file
from magenta.music.sequences_lib import apply_sustain_control_changes
from magenta.music.sequences_lib import BadTimeSignatureException
from magenta.music.sequences_lib import extract_subsequence
from magenta.music.sequences_lib import MultipleTempoException
from magenta.music.sequences_lib import MultipleTimeSignatureException
from magenta.music.sequences_lib import NegativeTimeException
from magenta.music.sequences_lib import quantize_note_sequence
from magenta.music.sequences_lib import quantize_to_step
from magenta.music.sequences_lib import steps_per_bar_in_quantized_sequence
from magenta.music.sequences_lib import steps_per_quarter_to_steps_per_second
from magenta.music.sequences_lib import trim_note_sequence
| {
"content_hash": "545441096b1522c18b1d953d706727ca",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 81,
"avg_line_length": 48.4875,
"alnum_prop": 0.8623356535189481,
"repo_name": "YoshikawaMasashi/magenta",
"id": "dff859aecd081220c764c36f38caafd931696f79",
"size": "4475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/music/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12668"
},
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "43259"
},
{
"name": "Jupyter Notebook",
"bytes": "2115912"
},
{
"name": "Protocol Buffer",
"bytes": "12931"
},
{
"name": "Python",
"bytes": "1243942"
},
{
"name": "Shell",
"bytes": "8783"
}
],
"symlink_target": ""
} |
from lxml import etree as ET
from lxml import etree
import datetime
import itertools
import traceback
import pprint
import copy
import dateutil
import logging
from foam.core.exception import CoreException
from foam.core.configdb import ConfigDB
class NoGroupName(CoreException):
def __init__ (self):
super(NoGroupName, self).__init__()
def __str__ (self):
return "A group was configured with no name specified"
class GroupNameAlreadyUsed(CoreException):
def __init__ (self, name):
super(GroupNameAlreadyUsed, self).__init__()
self.name = name
def __str__ (self):
return "The group name (%s) is defined more than once in the request." % (self.name)
class BadSliverExpiration(CoreException):
def __init__ (self, msg):
super(BadSliverExpiration, self).__init__()
self.msg = msg
def __str__ (self):
return self.msg
def renewSliver (slice_urn, creds, exptime):
from foam.geni.db import GeniDB
sliver_urn = GeniDB.getSliverURN(slice_urn)
reqexp = dateutil.parser.parse(str(exptime))
reqexp = _asUTC(reqexp)
max_expiration = _asUTC(datetime.datetime.utcnow()) + ConfigDB.getConfigItemByKey("geni.max-lease").getValue()
if reqexp > max_expiration:
raise BadSliverExpiration(
"The requested expiration date (%s) is past the allowed maximum expiration (%s)." %
(reqexp, max_expiration))
for cred in creds:
credexp = _asUTC(cred.expiration)
if reqexp > credexp:
continue
else:
GeniDB.updateSliverExpiration(sliver_urn, reqexp)
sobj = GeniDB.getSliverObj(sliver_urn)
sobj.resetExpireEmail()
sobj.store()
return sliver_urn
raise BadSliverExpiration(
"No credential found whose expiration is greater than or equal to the requested sliver expiration (%s)" %
(reqexp))
def _asUTC(dt):
tz_utc = dateutil.tz.tzutc()
if dt.tzinfo:
dt = dt.astimezone(tz_utc)
else:
dt = dt.replace(tzinfo=tz_utc)
return dt
class VirtualLink(object):
def __init__ (self):
self.bound_datapaths = {}
self.__vlinks = []
def __str__ (self):
return "virtual_link: %s" % (",".join([str(x) for x in self.__vlinks]))
def bindDatapath (self, dp):
if dp.dpid in self.bound_datapaths:
self.bound_datapaths[dp.dpid].merge(dp)
else:
self.bound_datapaths[dp.dpid] = copy.deepcopy(dp)
def generateVLinkEntries (self):
vlrules = []
for entry in self.__vlinks:
if entry: vlrules.append(entry)
return vlrules
def addVLinkFromString (self, vstr):
self.__vlinks.append(vstr.strip())
class FlowSpec(object):
def __init__ (self):
self.bound_datapaths = {}
self.__dlsrc = []
self.__dldst = []
self.__dltype = []
self.__vlanid = []
self.__nwsrc = []
self.__nwdst = []
self.__nwproto = []
self.__tpsrc = []
self.__tpdst = []
def bindDatapath (self, dp):
if dp.dpid in self.bound_datapaths:
self.bound_datapaths[dp.dpid].merge(dp)
else:
self.bound_datapaths[dp.dpid] = copy.deepcopy(dp)
def __str__ (self):
return "dl_src=%s; dl_dst=%s; dl_type:%s; vlan_id: %s;\n" \
" nw_src: %s; nw_dst: %s; nw_proto: %s; tp_src: %s; tp_dst: %s" % (
",".join([str(x) for x in self.__dlsrc]),
",".join([str(x) for x in self.__dldst]),
",".join([str(x) for x in self.__dltype]),
",".join([str(x) for x in self.__vlanid]),
",".join([str(x) for x in self.__nwsrc]),
",".join([str(x) for x in self.__nwdst]),
",".join([str(x) for x in self.__nwproto]),
",".join([str(x) for x in self.__tpsrc]),
",".join([str(x) for x in self.__tpdst]))
def __json__ (self):
return {"datapaths" : self.bound_datapaths,
"dl_src" : self.__dlsrc,
"dl_dst" : self.__dldst,
"dl_type" : self.__dltype,
"dl_vlan" : self.__vlanid,
"nw_src" : self.__nwsrc,
"nw_dst" : self.__nwdst,
"nw_proto" : self.__nwproto,
"tp_src" : self.__tpsrc,
"tp_dst" : self.__tpdst}
def getMACs (self):
for x in self.__dlsrc:
yield x
for y in self.__dldst:
yield y
def getEtherTypes (self):
for x in self.__dltype:
yield x
def getIPSubnets (self):
for x in self.__nwsrc:
yield x
for x in self.__nwdst:
yield x
#start of Vasileios's code (get the rest of the flowspec data)
def getVLANs (self):
for x in self.__vlanid:
yield x
def getNWProtos (self):
for x in self.__nwproto:
yield x
def getTPPorts (self):
for x in self.__tpsrc:
yield x
for x in self.__tpdst:
yield x
#end of Vasileios's code
def getDatapaths (self):
return self.bound_datapaths.values()
def hasVLANs (self):
if self.__vlanid:
return True
def generateFlowEntries (self, priority, datapaths):
dpports = []
if len(self.bound_datapaths) > 0:
datapaths = self.bound_datapaths.values()
for dpobj in datapaths:
if dpobj.ports:
dpports.extend(itertools.product([dpobj.dpid], [p.num for p in dpobj.ports]))
else:
dpports.extend(itertools.product([dpobj.dpid], [None]))
# Handle the "any" case
if not dpports: dpports.append((None, None))
# product won't return any values if any dict is empty, so if the values weren't set, add blanks
if not self.__dlsrc: self.__dlsrc.append(None)
if not self.__dldst: self.__dldst.append(None)
if not self.__dltype: self.__dltype.append(None)
if not self.__vlanid: self.__vlanid.append(None)
if not self.__nwsrc: self.__nwsrc.append(None)
if not self.__nwdst: self.__nwdst.append(None)
if not self.__nwproto: self.__nwproto.append(None)
if not self.__tpsrc: self.__tpsrc.append(None)
if not self.__tpdst: self.__tpdst.append(None)
entries = itertools.product(dpports, self.__dlsrc, self.__dldst, self.__dltype, self.__vlanid,
self.__nwsrc, self.__nwdst, self.__nwproto, self.__tpsrc, self.__tpdst)
fsrules = []
for entry in entries:
m = []
if entry[1]: m.append("dl_src=%s" % entry[1])
if entry[2]: m.append("dl_dst=%s" % entry[2])
if entry[3]: m.append("dl_type=%s" % entry[3])
if entry[4]: m.append("dl_vlan=%s" % entry[4])
if entry[5]: m.append("nw_src=%s" % entry[5])
if entry[6]: m.append("nw_dst=%s" % entry[6])
if entry[7]: m.append("nw_proto=%s" % entry[7])
if entry[8]: m.append("tp_src=%s" % entry[8])
if entry[9]: m.append("tp_dst=%s" % entry[9])
if entry[0][1]: m.append("in_port=%d" % entry[0][1])
e = []
if entry[0][0]:
e.append("%s" % str(entry[0][0]))
else:
e.append("any")
e.append(priority)
if m:
e.append("%s" % ",".join(m))
else:
e.append("any")
fsrules.append(e)
return fsrules
def addDlSrcFromString (self, vstr):
maclist = vstr.split(",")
for mac in maclist:
self.__dlsrc.append(mac.strip())
def addDlDstFromString (self, vstr):
maclist = vstr.split(",")
for mac in maclist:
self.__dldst.append(mac.strip())
def addDlTypeFromString (self, vstr):
for dltype in vstr.split(","):
self.__dltype.append(dltype.strip())
def addVlanIDFromString (self, vstr):
for vlid in vstr.split(","):
if vlid.count("-"):
l = vlid.split("-")
self.__vlanid.extend(range(int(l[0]), int(l[1])+1))
else:
self.__vlanid.append(int(vlid))
def addNwSrcFromString (self, vstr):
for nw in vstr.split(","):
self.__nwsrc.append(nw.strip())
def addNwDstFromString (self, vstr):
for nw in vstr.split(","):
self.__nwdst.append(nw.strip())
def addNwProtoFromString (self, vstr):
for proto in vstr.split(","):
if proto.count("-"):
l = proto.split("-")
self.__nwproto.extend(range(int(l[0]), int(l[1])+1))
else:
self.__nwproto.append(int(proto))
def addTpSrcFromString (self, vstr):
for port in vstr.split(","):
if port.count("-"):
l = port.split("-")
self.__tpsrc.extend(range(int(l[0]), int(l[1])+1))
else:
self.__tpsrc.append(int(port))
def addTpDstFromString (self, vstr):
for port in vstr.split(","):
if port.count("-"):
l = port.split("-")
self.__tpdst.extend(range(int(l[0]), int(l[1])+1))
else:
self.__tpdst.append(int(port))
class TooManyPrimaryControllers(CoreException):
def __init__ (self):
super(TooManyPrimaryControllers, self).__init__()
def __str__ (self):
return "More than one primary controller specified."
class NoPrimaryController(CoreException):
def __init__ (self):
super(NoPrimaryController, self).__init__()
def __str__ (self):
return "A primary controller must be specified."
class NoSliverTag(CoreException):
def __init__ (self):
super(NoSliverTag, self).__init__()
def __str__ (self):
return "Request contains no sliver."
class NoPacketTag(CoreException):
def __init__ (self, match):
super(NoPacketTag, self).__init__()
def __str__ (self):
return "Match does not contain a packet specification."
class NoControllersDefined(CoreException):
def __init__ (self):
super(NoControllersDefined, self).__init__()
def __str__ (self):
return "No controllers are defined for this request."
class NoHopsTag(CoreException):
def __init__ (self):
super(NoHopsTag, self).__init__()
def __str__ (self):
return "VirtualLink does not contain the list of hops."
| {
"content_hash": "bdfb7eda57cc408ab4a865dcdeb5e579",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 112,
"avg_line_length": 29.699386503067483,
"alnum_prop": 0.5937822763891758,
"repo_name": "ict-felix/stack",
"id": "3efa9b77c2ac2a72f22d63b3298b434283547228",
"size": "9773",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ofam/src/src/foam/lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from unittest import TestCase
import six
from sentry.utils.compat.mock import patch
from sentry.testutils.helpers.faux import faux
def fakefunc(*args, **kwargs):
pass
@patch("tests.sentry.testutils.helpers.test_faux.fakefunc")
class TestFaux(TestCase):
def test_args(self, mock):
fakefunc(True)
assert faux(mock).args == (True,)
def test_kwargs(self, mock):
fakefunc(foo=1)
assert faux(mock).kwargs == {"foo": 1}
def test_args_and_kwargs(self, mock):
fakefunc(True, foo=1)
assert faux(mock).args == (True,)
assert faux(mock).kwargs == {"foo": 1}
def test_called_with(self, mock):
fakefunc(True, foo=1)
assert faux(mock).called_with(True, foo=1)
def test_called_with_error_message(self, mock):
fakefunc(1)
try:
faux(mock).called_with(False)
except AssertionError as e:
assert six.text_type(e) == "Expected to be called with (False). Received (1)."
def test_kwargs_contain(self, mock):
fakefunc(foo=1)
assert faux(mock).kwargs_contain("foo")
def test_kwargs_contain_error_message(self, mock):
fakefunc(foo=1)
try:
faux(mock).kwargs_contain("bar")
except AssertionError as e:
assert six.text_type(e) == "Expected kwargs to contain key 'bar'. Received (foo=1)."
def test_kwarg_equals(self, mock):
fakefunc(foo=1, bar=2)
assert faux(mock).kwarg_equals("bar", 2)
def test_kwarg_equals_error_message(self, mock):
fakefunc(foo=1, bar=2)
try:
faux(mock).kwarg_equals("bar", True)
except AssertionError as e:
assert six.text_type(e) == "Expected kwargs[bar] to equal True. Received 2."
def test_args_contain(self, mock):
fakefunc(1, False, None)
assert faux(mock).args_contain(False)
def test_args_contain_error_message(self, mock):
fakefunc(1, None, False)
try:
faux(mock).args_contain(True)
except AssertionError as e:
assert six.text_type(e) == "Expected args to contain True. Received (1, None, False)."
def test_args_equal(self, mock):
fakefunc(1, False, None)
assert faux(mock).args_equals(1, False, None)
def test_args_equal_error_message(self, mock):
fakefunc(1, False)
try:
faux(mock).args_equals(["beep"])
except AssertionError as e:
assert six.text_type(e) == "Expected args to equal (['beep']). Received (1, False)."
| {
"content_hash": "e1084afed0aedee78e941235a9c677b4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 98,
"avg_line_length": 30.03448275862069,
"alnum_prop": 0.6081132797550708,
"repo_name": "beeftornado/sentry",
"id": "d3b382e318546a1c2a959b123b3a254b7789e059",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/testutils/helpers/test_faux.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='ut2_site',
packages=['ut2_site'],
include_package_data=True,
install_requires=[
'Flask',
'Flask-WTF',
'Flask-Uploads',
'pymongo',
'python-magic'
])
| {
"content_hash": "6b9aea94710694baa8520a4194739863",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 30,
"avg_line_length": 19.307692307692307,
"alnum_prop": 0.5418326693227091,
"repo_name": "cc-ru/ut2-site",
"id": "67ffdf8ce22c63a64feb12d3e7316fd44cbc70fe",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3865"
},
{
"name": "HTML",
"bytes": "5489"
},
{
"name": "Python",
"bytes": "11612"
}
],
"symlink_target": ""
} |
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import Cookie
from select import select
import socket
import websockify
import nova.console.sasl_helper as sasl_helper
from oslo.config import cfg
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common import log as logging
wsp_krb_opts = [
cfg.BoolOpt('vnc_krb_auth',
default=False,
help='Whether to use Kerberos authentication '
'between the vnc proxy and nova hosts'
' (you must have your qemu vnc set up to use'
' SASL authentication to use this)'),
cfg.StrOpt('vnc_krb_username',
default='admin',
help='The kerberos username for the proxy to use when '
'communicating with nova hosts (only used when '
'vnc_krb_auth is set to true')
]
CONF = cfg.CONF
CONF.register_opts(wsp_krb_opts)
LOG = logging.getLogger(__name__)
class NovaWebSocketProxy(websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
super(NovaWebSocketProxy, self).__init__(unix_target=None,
target_cfg=None,
ssl_target=None,
*args, **kwargs)
def new_client(self):
"""
Called after a new WebSocket connection has been established.
"""
cookie = Cookie.SimpleCookie()
cookie.load(self.headers.getheader('cookie'))
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
LOG.audit("Invalid Token: %s", token)
raise Exception(_("Invalid Token"))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg("connecting to: %s:%s" % (host, port))
LOG.audit("connecting to: %s:%s" % (host, port))
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit("Invalid Connection Info %s", token)
raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
if self.verbose and not self.daemon:
print(self.traffic_legend)
# Start proxying
try:
if CONF.vnc_krb_auth:
self.msg('Using SASL/GSSAPI Authentication '
'between proxy and host')
auth_id = CONF.vnc_krb_username
sasl_gss = sasl_helper.RFBSASLClient(sock=tsock,
msg=self.msg,
authid=auth_id)
sasl_gss.connect()
sasl_fake = sasl_helper.RFBSASLServer(sasl_gss,
sendf=self.send_frames,
recvf=self.recv_frames,
msg=self.msg)
sasl_fake.connect()
self.do_proxy(tsock, sasl_gss.recv_unwrap, sasl_gss.wrap)
else:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg("%s:%s: Target closed" % (host, port))
LOG.audit("%s:%s: Target closed" % (host, port))
raise
def do_proxy(self, target, target_recv_cb=lambda r, d: r(d),
target_send_cb=lambda x: x):
self.msg('Beginning mitm proxy mode...')
"""
Proxy client WebSocket to normal target socket.
"""
cqueue = []
c_pend = 0
tqueue = []
rlist = [self.client, target]
while True:
wlist = []
if tqueue:
wlist.append(target)
if cqueue or c_pend:
wlist.append(self.client)
ins, outs, excepts = select(rlist, wlist, [], 1)
if excepts:
raise Exception("Socket exception")
if self.client in outs:
# Send queued target data to the client
c_pend = self.send_frames(cqueue)
cqueue = []
if self.client in ins:
# Receive client data, decode it, and queue for target
bufs, closed = self.recv_frames()
tqueue.extend(bufs)
if closed:
# TODO(websockify): What about blocking on client socket?
self.vmsg("%s:%s: Client closed connection" % (
self.target_host, self.target_port))
raise self.CClose(closed['code'], closed['reason'])
if target in outs:
# Send queued client data to the target
dat_raw = tqueue.pop(0)
dat = target_send_cb(dat_raw)
sent = target.send(dat)
if sent == len(dat):
self.traffic(">")
else:
# requeue the remaining data
tqueue.insert(0, dat[sent:])
self.traffic(".>")
if target in ins:
# Receive target data, encode it and queue for client
buf = target_recv_cb(target.recv, self.buffer_size)
if len(buf) == 0:
self.vmsg("%s:%s: Target closed connection" % (
self.target_host, self.target_port))
raise self.CClose(1000, "Target closed")
cqueue.append(buf)
self.traffic("{")
| {
"content_hash": "18953a10065576c8ded50f62869833e3",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 77,
"avg_line_length": 36.05084745762712,
"alnum_prop": 0.49396646293684376,
"repo_name": "DirectXMan12/nova-hacking",
"id": "e441699a5062de02df7b042fc5dd081b85789bf2",
"size": "7066",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/console/websocketproxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0067_card_japan_only'),
]
operations = [
migrations.AlterField(
model_name='account',
name='play_with',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')]),
preserve_default=True,
),
migrations.AlterField(
model_name='userlink',
name='type',
field=models.CharField(max_length=20, verbose_name='Platform', choices=[(b'twitter', b'Twitter'), (b'facebook', b'Facebook'), (b'reddit', b'Reddit'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'otonokizaka', b'Otonokizaka.org Forum'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'osu', b'Osu!'), (b'mal', b'MyAnimeList'), (b'instagram', b'Instagram'), (b'myfigurecollection', b'MyFigureCollection'), (b'hummingbird', b'Hummingbird'), (b'youtube', b'YouTube'), (b'deviantart', b'DeviantArt'), (b'pixiv', b'Pixiv'), (b'github', b'GitHub'), (b'animeplanet', b'Anime-Planet')]),
preserve_default=True,
),
]
| {
"content_hash": "0093f20e6a72103b77aab25b381a6b18",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 603,
"avg_line_length": 53.4,
"alnum_prop": 0.604494382022472,
"repo_name": "rdsathene/SchoolIdolAPI",
"id": "cc61551663656894083df80c7e97082539f3db30",
"size": "1359",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "api/migrations/0068_auto_20150914_1909.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67801"
},
{
"name": "HTML",
"bytes": "474584"
},
{
"name": "JavaScript",
"bytes": "93928"
},
{
"name": "Python",
"bytes": "748281"
}
],
"symlink_target": ""
} |
from os import listdir, path
import numpy as np
from glaze.GL import glGenTextures, glDeleteTextures, glTexParameteri, glTexImage2D, glBindTexture, GL_TEXTURE_MAG_FILTER, GL_LINEAR, GL_TEXTURE_MIN_FILTER, \
GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, GL_UNSIGNED_BYTE, GL_CLAMP_TO_EDGE, GL_TEXTURE_WRAP_R, GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X, \
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, GL_TEXTURE_CUBE_MAP_POSITIVE_Z, \
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, GL_TEXTURE_CUBE_MAP
class CubeTexture(object):
def __init__(self, engine, ID):
self.engine = engine
self.__xNeg = ''
self.__yNeg = ''
self.__xPos = ''
self.__yPos = ''
self.__zPos = ''
self.__zNeg = ''
self._nativeTexture = -1
self.ID = ID
# TODO: add ID existence check
# TODO: inherit from base3Dobject3
def loadFromFolder(self, folderPath, getPixels):
# todo: move to backend
files = {}
sides = [
GL_TEXTURE_CUBE_MAP_POSITIVE_X, # right
GL_TEXTURE_CUBE_MAP_NEGATIVE_X, # left
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, # top
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, # bottom
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, # back
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z # front
]
filenames = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
filesInDir2 = listdir(folderPath)
filesInDir = []
for rf in filesInDir2:
rff = path.join(folderPath, rf)
if path.isfile(rff):
filesInDir.append(rff)
if len(filesInDir) < 6:
raise AttributeError('Not enough files in folder:\n' + folderPath)
for f in filesInDir:
if len(files) == 6:
break
filename = path.basename(f)
for k in filenames:
if filename.lower().__contains__(k):
files[k] = f
break
if len(files) < 6:
raise RuntimeError('Not enough \'valid\' files in folder.')
self.__xNeg = files['negx']
self.__xPos = files['posx']
self.__yNeg = files['negy']
self.__yPos = files['posy']
self.__zNeg = files['negz']
self.__zPos = files['posz']
tex = -1
try:
tex = np.empty((1,), np.uint32)
glGenTextures(1, tex)
# glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
# glActiveTexture(GL_TEXTURE0)
# glEnable(GL_TEXTURE_CUBE_MAP)
glBindTexture(GL_TEXTURE_CUBE_MAP, tex)
# NO OPTIONAL>>>>>>>>>>>
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE)
# <<<<<<<<<<<<<<<<<<<<<<
for name in filenames:
index = filenames.index(name)
pix, w, h, mode1, mode2 = getPixels(files[name])
'glTexImage2D(unsigned int target, int level, int internalformat, int width, int height, int border, ' \
'unsigned int format, unsigned int type, voidpable pixels)'
glTexImage2D(sides[index], 0, mode1, w, h, 0, mode2, GL_UNSIGNED_BYTE, pix)
self._nativeTexture = tex
except Exception:
try:
glDeleteTextures(tex)
except:
pass
raise
| {
"content_hash": "cd36210147e465caad900c3b18d3a69c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 162,
"avg_line_length": 39.87234042553192,
"alnum_prop": 0.5557630736392742,
"repo_name": "jr-garcia/Engendro3D",
"id": "1cfe1006fadeacac8f01747b17decf966148576a",
"size": "3748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "e3d/texture_management/CubeTextureClass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "17621"
},
{
"name": "Python",
"bytes": "456046"
}
],
"symlink_target": ""
} |
import subprocess, jsonWebToken, APIStockParser
from subprocess import Popen, PIPE
#Checks to see of the user has a ticket
def has_kerberos_ticket():
return True if subprocess.call(['klist', '-s']) == 0 else False
#Issues the JWT to the client
def issueJwt(username, symbol, days):
return jsonWebToken.encode({'user':username, 'symbol':symbol, 'daysToGet': days})
#decodes the jwt
def decodeJwt(objStr):
return jsonWebToken.decode(objStr)
#Authenticates the user
def kinit (username, password):
kinit = Popen(['kinit', username], stdin=PIPE, stdout=PIPE, stderr=PIPE)
kinit.stdin.write('%s\n' % password)
kinit.wait()
#A test method the authenticate the user
def kinit2(username, password):
kinit = '/usr/bin/kinit'
kinit_args = [ kinit, '%s@%s' % (username, "IST440.ABINGTON.PSU.EDU") ]
kinit = Popen(kinit_args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
kinit.stdin.write('%s\n' % password)
kinit.wait()
return username
#Starts the whole process to authenticate the user and start the confidence engine
def StartProcess(username, password, symbol, daysToGet):
kinit(username, password)
if has_kerberos_ticket():
jwt = issueJwt(username, symbol, daysToGet)
return APIStockParser.ParseStockData(jwt)
else:
return "Access Denied"
| {
"content_hash": "09d6e78280492b83ec448582ca640cde",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 33.026315789473685,
"alnum_prop": 0.7402390438247012,
"repo_name": "recreated/Yahoo_API_Parser",
"id": "55b458b8f602a525103a0a534948592cb8b792a1",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285425"
}
],
"symlink_target": ""
} |
"""Converts Python data into data for Google Visualization API clients.
This library can be used to create a google.visualization.DataTable usable by
visualizations built on the Google Visualization API. Output formats are raw
JSON, JSON response, JavaScript, CSV, and HTML table.
See http://code.google.com/apis/visualization/ for documentation on the
Google Visualization API.
"""
__author__ = "Amit Weinstein, Misha Seltzer, Jacob Baskin"
import cgi
import io
import csv
import datetime
try:
import json
except ImportError:
import simplejson as json
import types
class DataTableException(Exception):
"""The general exception object thrown by DataTable."""
pass
class DataTableJSONEncoder(json.JSONEncoder):
"""JSON encoder that handles date/time/datetime objects correctly."""
def __init__(self):
json.JSONEncoder.__init__(self,
separators=(",", ":"),
ensure_ascii=False)
def default(self, o):
if isinstance(o, datetime.datetime):
if o.microsecond == 0:
# If the time doesn't have ms-resolution, leave it out to keep
# things smaller.
return "Date(%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second)
else:
return "Date(%d,%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second,
o.microsecond / 1000)
elif isinstance(o, datetime.date):
return "Date(%d,%d,%d)" % (o.year, o.month - 1, o.day)
elif isinstance(o, datetime.time):
return [o.hour, o.minute, o.second]
else:
return super(DataTableJSONEncoder, self).default(o)
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], str + (types.NoneType,)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, (int, float)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, str):
return value
else:
return str(value).decode("utf-8")
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, str):
return value
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value).decode("utf-8")
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (str, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, str):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, str):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (str, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(table_description.keys()[0], str) and
isinstance(table_description.values()[0], tuple) and
len(table_description.values()[0]) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] +
DataTable.TableDescriptionParser(table_description.values()[0],
depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
proper_sort_keys = []
if isinstance(order_by, str) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in order_by:
if isinstance(key, str):
proper_sort_keys.append((key, 1))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
proper_sort_keys.append((key[0], key[1].lower() == "asc" and 1 or -1))
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
def SortCmpFunc(row1, row2):
"""cmp function for sorted. Compares by keys and 'asc'/'desc' keywords."""
for key, asc_mult in proper_sort_keys:
cmp_result = asc_mult * ((row1[0].get(key)<row2[0].get(key))-(row1[0].get(key)>row2[0].get(key)))
if cmp_result:
return cmp_result
return 0
return sorted(self.__data, cmp=SortCmpFunc)
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
cgi.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % cgi.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % cgi.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = io.StringIO.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
writer.writerow([col_dict[col]["label"].encode("utf-8")
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(self.ToString(value[1]).encode("utf-8"))
else:
cells_list.append(self.ToString(value[0]).encode("utf-8"))
else:
cells_list.append(self.ToString(value).encode("utf-8"))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
return (self.ToCsv(columns_order, order_by, separator="\t")
.decode("utf-8").encode("UTF-16LE"))
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},{v:null}]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
return encoder.encode(
self._ToJSonObj(columns_order, order_by)).encode("utf-8")
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoder = DataTableJSONEncoder()
return "%s(%s);" % (response_handler,
encoder.encode(response_obj).encode("utf-8"))
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
| {
"content_hash": "2570cb0285a26eff7843bd7f67197fce",
"timestamp": "",
"source": "github",
"line_count": 1075,
"max_line_length": 105,
"avg_line_length": 41.85302325581395,
"alnum_prop": 0.6026626955903271,
"repo_name": "CVBDL/ccollab2eeplatform-python",
"id": "ed4174cd95ef6730c2e87d380471ded88dc26b9e",
"size": "45614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccollab2eeplatform/google_visualization/gviz_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113331"
}
],
"symlink_target": ""
} |
'''
Created on 15 Feb 2013
@author: plish
'''
from distutils.core import setup
setup(
name = 'Trolly',
version = '1.0.0',
author = 'plish',
author_email = 'plish.development@gmail.com',
url = 'https://github.com/plish/Trolly',
packages = ['trolly'],
license = 'LICENCE.txt',
install_requires = ['httplib2', 'singledispatch'],
description = 'Trello API Wrapper',
long_description = 'For more detail please see the github page'
)
| {
"content_hash": "4da66129a4e3e68c6b44a8e4b1e90435",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 23.545454545454547,
"alnum_prop": 0.581081081081081,
"repo_name": "plish/Trolly",
"id": "23a417f4e35c4c96655dc0e50ce3fcbefd5dc957",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55470"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
from .views import ArticleListView, ArticleDetailView
urlpatterns = patterns('',
url(r'^$', ArticleListView.as_view(), name='article-list'),
url(r'^(?P<slug>[^/]+)/$', ArticleDetailView.as_view(), name='article-details'),
)
| {
"content_hash": "6789e3ba1b9310c0936565e7f817fe55",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 84,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.6818181818181818,
"repo_name": "jrief/django-parler",
"id": "a82e08b39a231b923526fbe5c6762909b8b93884",
"size": "264",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "example/article/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4695"
},
{
"name": "HTML",
"bytes": "6000"
},
{
"name": "Python",
"bytes": "216359"
}
],
"symlink_target": ""
} |
import sys, os
sys.path.insert(0, os.path.split(os.path.split(os.getcwd())[0])[0])
from django.core.management import execute_manager
try:
from . import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| {
"content_hash": "f554c661b8e99569fb6bc7c76970a645",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 299,
"avg_line_length": 44,
"alnum_prop": 0.7159090909090909,
"repo_name": "bjarnoldus/django-roa",
"id": "742cfeb7271ecedb7b4c642fa5b3417515e6e7cf",
"size": "616",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/django_roa_client/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "80515"
}
],
"symlink_target": ""
} |
import torch
from typing import List
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("compose")
class ComposeEncoder(Seq2SeqEncoder):
"""This class can be used to compose several encoders in sequence.
Among other things, this can be used to add a "pre-contextualizer" before a Seq2SeqEncoder.
Registered as a `Seq2SeqEncoder` with name "compose".
# Parameters
encoders : `List[Seq2SeqEncoder]`, required.
A non-empty list of encoders to compose. The encoders must match in bidirectionality.
"""
def __init__(self, encoders: List[Seq2SeqEncoder]):
super().__init__()
self.encoders = encoders
for idx, encoder in enumerate(encoders):
self.add_module("encoder%d" % idx, encoder)
# Compute bidirectionality.
all_bidirectional = all(encoder.is_bidirectional() for encoder in encoders)
any_bidirectional = any(encoder.is_bidirectional() for encoder in encoders)
self.bidirectional = all_bidirectional
if all_bidirectional != any_bidirectional:
raise ValueError("All encoders need to match in bidirectionality.")
if len(self.encoders) < 1:
raise ValueError("Need at least one encoder.")
last_enc = None
for enc in encoders:
if last_enc is not None and last_enc.get_output_dim() != enc.get_input_dim():
raise ValueError("Encoder input and output dimensions don't match.")
last_enc = enc
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor computed by composing the sequence of encoders.
"""
for encoder in self.encoders:
inputs = encoder(inputs, mask)
return inputs
def get_input_dim(self) -> int:
return self.encoders[0].get_input_dim()
def get_output_dim(self) -> int:
return self.encoders[-1].get_output_dim()
def is_bidirectional(self) -> bool:
return self.bidirectional
| {
"content_hash": "db278ad3ac496915fc4cf978a9b540a1",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 95,
"avg_line_length": 34.028985507246375,
"alnum_prop": 0.6448040885860307,
"repo_name": "allenai/allennlp",
"id": "c0dfc88f429181d7b9c9471f15c64eace5e14019",
"size": "2348",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "allennlp/modules/seq2seq_encoders/compose_encoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
} |
"""Extractors for configuration and job-xml nodes"""
from os import path
from typing import Dict, List
import xml.etree.ElementTree as ET
from o2a.converter.constants import HDFS_FOLDER
from o2a.converter.exceptions import ParseException
from o2a.o2a_libs import el_parser
TAG_CONFIGURATION = "configuration"
TAG_PROPERTY = "property"
TAG_NAME = "name"
TAG_VALUE = "value"
TAG_JOB_XML = "job-xml"
def extract_properties_from_configuration_node(config_node: ET.Element) -> Dict[str, str]:
"""Extracts configuration properties from ``configuration`` node"""
properties_dict: Dict[str, str] = dict()
for property_node in config_node.findall(TAG_PROPERTY):
name_node = property_node.find(TAG_NAME)
value_node = property_node.find(TAG_VALUE)
if name_node is None or value_node is None:
raise ParseException(
'Element "property" should have direct children elements: name, value. One of them does not '
"exist. Make sure the configuration element is valid."
)
name = name_node.text
value = value_node.text
if not name:
raise ParseException(
'Element "name" should have content, however its value is empty. Make sure the element has '
"the correct content."
)
if not value:
raise ParseException(
'Element "value" should have content, however its value is empty. Make sure the element has '
"the correct content."
)
properties_dict[name] = el_parser.translate(value)
return properties_dict
def extract_properties_from_job_xml_nodes(job_xml_nodes: List[ET.Element], input_directory_path: str):
"""Extracts configuration properties from ``job_xml`` nodes"""
properties_dict: Dict[str, str] = dict()
for xml_file in job_xml_nodes:
file_name = xml_file.text
if not file_name:
raise ParseException(
'Element "job-xml" should have content, however its value is empty. Make sure the element '
"has the correct content."
)
file_path = path.join(input_directory_path, HDFS_FOLDER, file_name)
config_tree = ET.parse(file_path)
config_node = config_tree.getroot()
if not config_node:
raise ParseException(
"A job-xml configuration node is specified in the workflow XML, however its value is empty."
"Make sure the path to a configuration file is valid."
)
new_properties = extract_properties_from_configuration_node(config_node)
properties_dict.update(new_properties)
return properties_dict
| {
"content_hash": "e99101f94c2d3721e97934485c1b1711",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 109,
"avg_line_length": 37.861111111111114,
"alnum_prop": 0.6419662509170947,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "755975902e277e717c0bcb103e54f3fd9aa7a404",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o2a/utils/config_extractors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
} |
from alembic import context
from prettyconf import config
from sqlalchemy import create_engine
from thales import db
DATABASE_URL = config('DATABASE_URL')
METADATA = db.metadata
def run_migrations_offline():
context.configure(url=DATABASE_URL, metadata=METADATA)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
engine = create_engine(DATABASE_URL)
with engine.connect() as connection:
context.configure(connection=connection, target_metadata=METADATA)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| {
"content_hash": "7410ca07fc9c565ca1b5d47cff56eeee",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.7272727272727273,
"repo_name": "cacarrara/thales",
"id": "f7a543f16cbc21ae85644a0070a6d9a51dc7fa7a",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thales/migrations/env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "828"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "8465"
}
],
"symlink_target": ""
} |
from typing import Generator
from typing import Optional
from typing import Union
import pytest
from _pytest._io.saferepr import saferepr
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureDef
from _pytest.fixtures import SubRequest
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"--setuponly",
"--setup-only",
action="store_true",
help="only setup fixtures, do not execute tests.",
)
group.addoption(
"--setupshow",
"--setup-show",
action="store_true",
help="show setup of fixtures while executing tests.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(
fixturedef: FixtureDef[object], request: SubRequest
) -> Generator[None, None, None]:
yield
if request.config.option.setupshow:
if hasattr(request, "param"):
# Save the fixture parameter so ._show_fixture_action() can
# display it now and during the teardown (in .finish()).
if fixturedef.ids:
if callable(fixturedef.ids):
param = fixturedef.ids(request.param)
else:
param = fixturedef.ids[request.param_index]
else:
param = request.param
fixturedef.cached_param = param # type: ignore[attr-defined]
_show_fixture_action(fixturedef, "SETUP")
def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None:
if fixturedef.cached_result is not None:
config = fixturedef._fixturemanager.config
if config.option.setupshow:
_show_fixture_action(fixturedef, "TEARDOWN")
if hasattr(fixturedef, "cached_param"):
del fixturedef.cached_param # type: ignore[attr-defined]
def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None:
config = fixturedef._fixturemanager.config
capman = config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture()
tw = config.get_terminal_writer()
tw.line()
tw.write(" " * 2 * fixturedef.scopenum)
tw.write(
"{step} {scope} {fixture}".format(
step=msg.ljust(8), # align the output to TEARDOWN
scope=fixturedef.scope[0].upper(),
fixture=fixturedef.argname,
)
)
if msg == "SETUP":
deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
if deps:
tw.write(" (fixtures used: {})".format(", ".join(deps)))
if hasattr(fixturedef, "cached_param"):
tw.write("[{}]".format(saferepr(fixturedef.cached_param, maxsize=42))) # type: ignore[attr-defined]
tw.flush()
if capman:
capman.resume_global_capture()
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
if config.option.setuponly:
config.option.setupshow = True
return None
| {
"content_hash": "1fe3157a386a77a760c6af5a025781a0",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 108,
"avg_line_length": 32.9468085106383,
"alnum_prop": 0.636099451081692,
"repo_name": "nicoddemus/pytest",
"id": "44a1094c0d24faa3c0573ee24e1fd604e49266bc",
"size": "3097",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "src/_pytest/setuponly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2488578"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
import string
import re
import math
import nltk
class Resources(object):
@staticmethod
def set_config(conf):
Resources.conf = conf
@staticmethod
def ensure_nltk_packages():
for package in ('stopwords', 'punkt', 'wordnet'):
nltk.download(package)
""" Thresholds """
adverb_threshold = math.log(500000)
punctuation = set(string.punctuation)
punct_re = re.compile("\W+")
num_re = re.compile(r'^([0-9][0-9.,]*)([mMkK]?)$', re.UNICODE)
question_starters = set([
'is', 'does', 'do', 'what', 'where', 'how', 'why',
])
pronouns = {
'me': 'i', 'my': 'i',
'your': 'you',
'him': 'he', 'his': 'he',
'her': 'she',
'us': 'we', 'our': 'we',
'them': 'they', 'their': 'they',
}
written_numbers = {
"zero": 0, "one": 1, "two": 2, "three": 3, "four": 4, "five": 5,
"six": 6, "seven": 7, "eight": 8, "nine": 9, "ten": 10
}
stopwords = set(
nltk.corpus.stopwords.words('english')) - set(pronouns.iterkeys())
twitter_cache = {}
_global_freqs = None
_adverb_cache = {}
@staticmethod
def is_pronoun_equivalent(word1, word2):
l1 = word1.lower()
l2 = word2.lower()
if l1 in Resources.pronouns and l2 in Resources.pronouns:
return Resources.pronouns[l1] == Resources.pronouns[l2]
return False
@staticmethod
def get_global_freq(lookup):
if not Resources._global_freqs:
Resources._global_freqs = {}
with open(Resources.conf.get('global', 'freqs')) as f:
for l in f:
try:
fd = l.decode('utf8').strip().split(' ')
word = fd[1]
logfreq = math.log(int(fd[0]) + 2)
Resources._global_freqs[word] = logfreq
except (ValueError, IndexError):
continue
return Resources._global_freqs.get(lookup, 2)
@staticmethod
def is_frequent_adverb(word, pos):
if word not in Resources._adverb_cache:
ans = (
pos is not None and pos[:2] == 'RB' and
Resources.get_global_freq(word) > Resources.adverb_threshold)
Resources._adverb_cache[word] = ans
return Resources._adverb_cache[word]
@staticmethod
def is_num_equivalent(word1, word2):
num1 = Resources.to_num(word1)
num2 = Resources.to_num(word2)
if num1 and num2:
return num1 == num2
return False
@staticmethod
def to_num(word):
if word in Resources.written_numbers:
return Resources.written_numbers[word]
m = Resources.num_re.match(word)
if not m:
return False
num = float(m.group(1).replace(',', ''))
if m.group(2):
c = m.group(2).lower()
if c == 'k':
num *= 1000
else:
num *= 1000000
return num
@staticmethod
def twitter_candidates(word, model):
if model not in Resources.twitter_cache:
Resources.twitter_cache[model] = {}
if word not in Resources.twitter_cache[model]:
# adding word as hashtag
candidates = set(['#' + word])
candidates |= set(Resources.norvig_spellchecker(word))
candidates.add(Resources.trim_dup_letters(word))
for a, b in Resources.part_of_vocab(word, model):
candidates.add(a)
candidates.add(b)
Resources.twitter_cache[model][word] = set(
filter(lambda x: x in model, candidates))
return Resources.twitter_cache[model][word]
@staticmethod
def trim_dup_letters(word):
new_w = word[0]
for c in word:
if not new_w[-1] == c:
new_w += c
return new_w
@staticmethod
def norvig_spellchecker(word, dist=2):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + inserts + replaces)
@staticmethod
def part_of_vocab(word, dictionary):
if len(word) < 5:
return []
splits = [(word[:i], word[i:]) for i in range(3, len(word) - 2)]
parts = []
for a, b in splits:
if a in dictionary and b in dictionary:
parts.append((a, b))
return parts
| {
"content_hash": "9dd113d4ff9a6f47d637e18ac7c421b8",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 33.12413793103448,
"alnum_prop": 0.5252966895690193,
"repo_name": "recski/semeval",
"id": "832e7a5a1fc753f37fdc5e46fab9a515110a2998",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "semeval/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119699"
},
{
"name": "Shell",
"bytes": "4077"
}
],
"symlink_target": ""
} |
from pypom import Region
from selenium.webdriver.common.by import By
from pages.desktop.base import Base
class Home(Base):
"""Addons Home page"""
_extensions_category_locator = (By.CLASS_NAME, 'Home-CuratedCollections')
_featured_extensions_locator = (By.CLASS_NAME, 'Home-FeaturedExtensions')
_featured_themes_locator = (By.CLASS_NAME, 'Home-FeaturedThemes')
_popular_extensions_locator = (By.CLASS_NAME, 'Home-PopularExtensions')
_popular_themes_locator = (By.CLASS_NAME, 'Home-PopularThemes')
_themes_category_locator = (By.CLASS_NAME, 'Home-CuratedThemes')
_toprated_themes_locator = (By.CLASS_NAME, 'Home-TopRatedThemes')
@property
def popular_extensions(self):
el = self.find_element(*self._popular_extensions_locator)
return self.Extensions(self, el)
@property
def featured_extensions(self):
el = self.find_element(*self._featured_extensions_locator)
return self.Extensions(self, el)
@property
def featured_themes(self):
el = self.find_element(*self._featured_themes_locator)
return self.Themes(self, el)
@property
def popular_themes(self):
el = self.find_element(*self._popular_themes_locator)
return self.Themes(self, el)
@property
def toprated_themes(self):
el = self.find_element(*self._toprated_themes_locator)
return self.Themes(self, el)
@property
def extension_category(self):
el = self.find_element(*self._extensions_category_locator)
return self.Category(self, el)
@property
def theme_category(self):
el = self.find_element(*self._themes_category_locator)
return self.Category(self, el)
class Category(Region):
_extensions_locator = (By.CLASS_NAME, 'Home-SubjectShelf-list-item')
@property
def list(self):
items = self.find_elements(*self._extensions_locator)
return [self.CategoryDetail(self.page, el) for el in items]
class CategoryDetail(Region):
_extension_link_locator = (By.CLASS_NAME, 'Home-SubjectShelf-link')
_extension_name_locator = (
By.CSS_SELECTOR, '.Home-SubjectShelf-link span')
@property
def name(self):
return self.find_element(*self._extension_name_locator).text
def click(self):
self.root.click()
from pages.desktop.extensions import Extensions
return Extensions(self.selenium, self.page.base_url)
class Extensions(Region):
_browse_all_locator = (By.CSS_SELECTOR, '.Card-footer-link > a')
_extensions_locator = (By.CLASS_NAME, 'SearchResult')
_extension_card_locator = (By.CSS_SELECTOR, '.Home-category-li')
@property
def list(self):
items = self.find_elements(*self._extensions_locator)
return [Home.ExtensionsList(self.page, el) for el in items]
@property
def browse_all(self):
self.find_element(*self._browse_all_locator).click()
from pages.desktop.search import Search
search = Search(self.selenium, self.page.base_url)
return search.wait_for_page_to_load()
class Themes(Region):
_browse_all_locator = (By.CSS_SELECTOR, '.Card-footer-link > a')
_themes_locator = (By.CLASS_NAME, 'SearchResult--theme')
_theme_card_locator = (By.CSS_SELECTOR, '.Home-category-li')
@property
def list(self):
items = self.find_elements(*self._themes_locator)
return [Home.ExtensionsList(self.page, el) for el in items]
@property
def browse_all(self):
self.find_element(*self._browse_all_locator).click()
from pages.desktop.search import Search
search = Search(self.selenium, self.page.base_url)
return search.wait_for_page_to_load()
class ExtensionsList(Region):
_extension_link_locator = (By.CLASS_NAME, 'SearchResult-link')
_extension_name_locator = (By.CLASS_NAME, 'SearchResult-name')
@property
def name(self):
return self.find_element(*self._extension_name_locator).text
def click(self):
self.find_element(*self._extension_link_locator).click()
from pages.desktop.extensions import Extensions
return Extensions(self.selenium, self.page.base_url)
| {
"content_hash": "cc4b1d94926a75e404d0422c2ffaaab1",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 36.8099173553719,
"alnum_prop": 0.6333632689717108,
"repo_name": "atiqueahmedziad/addons-server",
"id": "fcd625495c8ff5e059a9ffc23094809cace13530",
"size": "4454",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/ui/pages/desktop/home.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810065"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "599024"
},
{
"name": "JavaScript",
"bytes": "1070220"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5272277"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1497"
}
],
"symlink_target": ""
} |
from django import VERSION as DJANGO_VERSION
from django_comments_xtd.conf import settings
if DJANGO_VERSION[1] <= 5: # Django <= 1.5
from django_comments_xtd.compat import import_by_path as import_string
elif 6 <= DJANGO_VERSION[1] < 8: # Django v1.6.x and 1.7.x
from django.utils.module_loading import import_by_path as import_string
else: # Django >= 1.8
from django.utils.module_loading import import_string
# While there's official support for Django version prior to 1.8
try:
import django_comments
import django_comments.urls as django_comments_urls
from django_comments.models import Comment
from django_comments.feeds import LatestCommentFeed
from django_comments.forms import CommentForm
from django_comments.signals import comment_was_posted
except ImportError:
import django.contrib.comments as django_comments
import django.contrib.comments.urls as django_comments_urls
from django.contrib.comments import get_form
from django.contrib.comments.models import Comment
from django.contrib.comments.feeds import LatestCommentFeed
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.signals import comment_was_posted
def get_model():
return import_string(settings.COMMENTS_XTD_MODEL)
def get_form():
return import_string(settings.COMMENTS_XTD_FORM_CLASS)
VERSION = (1, 4, 0, 'b', 2) # following PEP 440
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3] != 'f':
version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
| {
"content_hash": "82e1b5e4f8b43c0f469ce506b0541e6b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 38.06818181818182,
"alnum_prop": 0.7253731343283583,
"repo_name": "agilosoftware/django-comments-xtd",
"id": "a80a929406adb6cf75b829d183c0739f51c13fbf",
"size": "1675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_comments_xtd/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "33981"
},
{
"name": "Python",
"bytes": "145382"
},
{
"name": "Shell",
"bytes": "3705"
}
],
"symlink_target": ""
} |
"""
Port description language
This module implements a description mini-language for ports, and provides
functions to parse it and to use it to directly construct appropriate
network server services or to directly listen on them.
Here are some examples. They assume the following toy resource and factory
definitions::
class Simple(resource.Resource):
isLeaf = True
def render_GET(self, request):
return "<html>Hello, world!</html>"
class FingerProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.loseConnection()
class FingerFactory(protocol.ServerFactory):
protocol = FingerProtocol
Examples using SSL require a private key and a certificate. If a private key
file name (C{privateKey}) isn't provided, a "server.pem" file is assumed to
exist which contains the private key. If the certificate file name (C{certKey})
isn't provided, the private key file is assumed to contain the certificate as
well::
>>> s=service("80", server.Site(Simple()))
>>> s=service("tcp:80", server.Site(Simple()))
>>> s=service("tcp:80:interface=127.0.0.1", server.Site(Simple()))
>>> s=service("ssl:443", server.Site(Simple()))
>>> s=service("ssl:443:privateKey=mykey.pem", server.Site(Simple()))
>>> s=service("ssl:443:privateKey=mykey.pem:certKey=cert.pem", server.Site(Simple()))
>>> s=service("unix:/var/run/finger", FingerFactory())
>>> s=service("unix:/var/run/finger:mode=660", FingerFactory())
>>> p=listen("80", server.Site(Simple()))
>>> p=listen("tcp:80", server.Site(Simple()))
>>> p=listen("tcp:80:interface=127.0.0.1", server.Site(Simple()))
>>> p=listen("ssl:443", server.Site(Simple()))
>>> p=listen("ssl:443:privateKey=mykey.pem", server.Site(Simple()))
>>> p=listen("ssl:443:privateKey=mykey.pem:certKey=cert.pem", server.Site(Simple()))
>>> p=listen("unix:/var/run/finger", FingerFactory())
>>> p=listen("unix:/var/run/finger:mode=660", FingerFactory())
>>> p=listen("unix:/var/run/finger:lockfile=0", FingerFactory())
See specific function documentation for more information.
Maintainer: Moshe Zadka
"""
from __future__ import generators
def _parseTCP(factory, port, interface="", backlog=50):
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50):
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['sslmethod'] = getattr(ssl.SSL, sslmethod)
cf = ssl.DefaultOpenSSLContextFactory(privateKey, certKey, **kw)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
_funcs = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL}
_OP, _STRING = range(2)
def _tokenize(description):
current = ''
ops = ':='
nextOps = {':': ':=', '=': ':'}
description = iter(description)
for n in description:
if n in ops:
yield _STRING, current
yield _OP, n
current = ''
ops = nextOps[n]
elif n=='\\':
current += description.next()
else:
current += n
yield _STRING, current
def _parse(description):
args, kw = [], {}
def add(sofar):
if len(sofar)==1:
args.append(sofar[0])
else:
kw[sofar[0]] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value==':':
add(sofar)
sofar = ()
add(sofar)
return args, kw
def parse(description, factory, default=None):
"""
Parse the description of a reliable virtual circuit server (that is, a
TCP port, a UNIX domain socket or an SSL port) and return the data
necessary to call the reactor methods to listen on the given socket with
the given factory.
An argument with no colons means a default port. Usually the default
type is C{tcp}, but passing a non-C{None} value as C{default} will set
that as the default. Otherwise, it is a colon-separated string. The
first part means the type -- currently, it can only be ssl, unix or tcp.
After that, comes a list of arguments. Arguments can be positional or
keyword, and can be mixed. Keyword arguments are indicated by
C{'name=value'}. If a value is supposed to contain a C{':'}, a C{'='} or
a C{'\\'}, escape it with a C{'\\'}.
For TCP, the arguments are the port (port number) and, optionally the
interface (interface on which to listen) and backlog (how many clients
to keep in the backlog).
For UNIX domain sockets, the arguments are address (the file name of the
socket) and optionally the mode (the mode bits of the file, as an octal
number) and the backlog (how many clients to keep in the backlog).
For SSL sockets, the arguments are the port (port number) and,
optionally, the privateKey (file in which the private key is in),
certKey (file in which the certification is in), sslmethod (the name of
the SSL method to allow), the interface (interface on which to listen)
and the backlog (how many clients to keep in the backlog).
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{tuple}
@return: a tuple of string, tuple and dictionary. The string is the name
of the method (sans C{'listen'}) to call, and the tuple and dictionary
are the arguments and keyword arguments to the method.
@raises ValueError: if the string is formatted incorrectly.
@raises KeyError: if the type is other than unix, ssl or tcp.
"""
args, kw = _parse(description)
if not args or (len(args)==1 and not kw):
args[0:0] = [default or 'tcp']
return (args[0].upper(),)+_funcs[args[0]](factory, *args[1:], **kw)
def service(description, factory, default=None):
"""Return the service corresponding to a description
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{twisted.application.service.IService}
@return: the service corresponding to a description of a reliable
virtual circuit server.
See the documentation of the C{parse} function for description
of the semantics of the arguments.
"""
from twisted.application import internet
name, args, kw = parse(description, factory, default)
return getattr(internet, name+'Server')(*args, **kw)
def listen(description, factory, default=None):
"""Listen on a port corresponding to a description
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{twisted.internet.interfaces.IListeningPort}
@return: the port corresponding to a description of a reliable
virtual circuit server.
See the documentation of the C{parse} function for description
of the semantics of the arguments.
"""
from twisted.internet import reactor
name, args, kw = parse(description, factory, default)
return getattr(reactor, 'listen'+name)(*args, **kw)
__all__ = ['parse', 'service', 'listen']
| {
"content_hash": "9a3994c49e384c7b64402e2002b5673d",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 89,
"avg_line_length": 39.4974358974359,
"alnum_prop": 0.6560633601661906,
"repo_name": "sorenh/cc",
"id": "3654413488d689e4c440062065baeef7f3df593a",
"size": "7839",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "vendor/Twisted-10.0.0/twisted/application/strports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "707"
},
{
"name": "Python",
"bytes": "398663"
},
{
"name": "Shell",
"bytes": "12374"
}
],
"symlink_target": ""
} |
import contextlib
import os
import unittest
from systrace import decorators
from systrace import run_systrace
from systrace import update_systrace_trace_viewer
from systrace import util
TEST_DIR = os.path.join(os.path.dirname(__file__), '..', 'test_data')
COMPRESSED_ATRACE_DATA = os.path.join(TEST_DIR, 'compressed_atrace_data.txt')
DECOMPRESSED_ATRACE_DATA = os.path.join(TEST_DIR,
'decompressed_atrace_data.txt')
NON_EXISTENT_DATA = os.path.join(TEST_DIR, 'THIS_FILE_DOES_NOT_EXIST.txt')
class AtraceFromFileAgentTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_from_file(self):
update_systrace_trace_viewer.update(force_update=True)
self.assertTrue(os.path.exists(
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE))
output_file_name = util.generate_random_filename_for_test()
try:
# use from-file to create a specific expected output
run_systrace.main_impl(['./run_systrace.py',
'--from-file',
COMPRESSED_ATRACE_DATA,
'-o',
output_file_name])
# and verify file contents
with contextlib.nested(open(output_file_name, 'r'),
open(DECOMPRESSED_ATRACE_DATA, 'r')) as (f1, f2):
full_trace = f1.read()
expected_contents = f2.read()
self.assertTrue(expected_contents in full_trace)
except:
raise
finally:
os.remove(update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
if os.path.exists(output_file_name):
os.remove(output_file_name)
@decorators.HostOnlyTest
def test_missing_file(self):
try:
run_systrace.main_impl(['./run_systrace.py',
'--from-file',
NON_EXISTENT_DATA])
self.fail('should not get here')
except IOError:
pass
| {
"content_hash": "4e8e0a0c940b8211fb7a8d1779f0c8c8",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 36.75471698113208,
"alnum_prop": 0.6155030800821355,
"repo_name": "catapult-project/catapult-csm",
"id": "2531c919f1d77abdf6fae71d994f6f8a3ccaec0e",
"size": "2138",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "systrace/systrace/tracing_agents/atrace_from_file_agent_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
} |
"""Adding ability to mark domains as 'sensitive'
Revision ID: 4c50b903d1ae
Revises: 33de094da890
Create Date: 2015-12-30 10:19:30.057791
"""
# revision identifiers, used by Alembic.
revision = "4c50b903d1ae"
down_revision = "33de094da890"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("domains", sa.Column("sensitive", sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("domains", "sensitive")
### end Alembic commands ###
| {
"content_hash": "59bf7dff0bb780706a24117bb8f871e6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 25.48148148148148,
"alnum_prop": 0.7078488372093024,
"repo_name": "Netflix/lemur",
"id": "93d4a312c8aff45d8a8b056dce50aad00040ba52",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/migrations/versions/4c50b903d1ae_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2728"
},
{
"name": "Dockerfile",
"bytes": "2597"
},
{
"name": "HTML",
"bytes": "314713"
},
{
"name": "JavaScript",
"bytes": "15496"
},
{
"name": "Makefile",
"bytes": "3791"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1530505"
},
{
"name": "Shell",
"bytes": "2339"
}
],
"symlink_target": ""
} |
class Config(object):
PLATFORM = 'sqlite'
DATABASE = '/tmp/blog.db'
DEBUG = False
TESTING = False
SECRET_KEY = 'development key'
MAX_PAGE_ENTRIES = 5
class ProductionConfig(Config):
DATABASE_URI = 'mysql://user@localhost/foo'
class DevelopmentConfig(Config):
DEBUG = True
class TestinConfig(Config):
TESTING = True
| {
"content_hash": "06f756cca2b100adbac185674741dff6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 44,
"avg_line_length": 21.75,
"alnum_prop": 0.6839080459770115,
"repo_name": "proudlygeek/proudlygeek-blog",
"id": "364c00dd2155453c44301a363c6b2e091af4f298",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/gae-support",
"path": "config/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6344"
},
{
"name": "JavaScript",
"bytes": "31516"
},
{
"name": "Python",
"bytes": "3219953"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_ha
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_ha.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_ha_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ha': {
'arps': '3',
'arps_interval': '4',
'authentication': 'enable',
'cpu_threshold': 'test_value_6',
'encryption': 'enable',
'ftp_proxy_threshold': 'test_value_8',
'gratuitous_arps': 'enable',
'group_id': '10',
'group_name': 'test_value_11',
'ha_direct': 'enable',
'ha_eth_type': 'test_value_13',
'ha_mgmt_status': 'enable',
'ha_uptime_diff_margin': '15',
'hb_interval': '16',
'hb_lost_threshold': '17',
'hbdev': 'test_value_18',
'hc_eth_type': 'test_value_19',
'hello_holddown': '20',
'http_proxy_threshold': 'test_value_21',
'imap_proxy_threshold': 'test_value_22',
'inter_cluster_session_sync': 'enable',
'key': 'test_value_24',
'l2ep_eth_type': 'test_value_25',
'link_failed_signal': 'enable',
'load_balance_all': 'enable',
'memory_compatible_mode': 'enable',
'memory_threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast_ttl': '32',
'nntp_proxy_threshold': 'test_value_33',
'override': 'enable',
'override_wait_time': '35',
'password': 'test_value_36',
'pingserver_failover_threshold': '37',
'pingserver_flip_timeout': '38',
'pingserver_monitor_interface': 'test_value_39',
'pingserver_slave_force_reset': 'enable',
'pop3_proxy_threshold': 'test_value_41',
'priority': '42',
'route_hold': '43',
'route_ttl': '44',
'route_wait': '45',
'schedule': 'none',
'session_pickup': 'enable',
'session_pickup_connectionless': 'enable',
'session_pickup_delay': 'enable',
'session_pickup_expectation': 'enable',
'session_pickup_nat': 'enable',
'session_sync_dev': 'test_value_52',
'smtp_proxy_threshold': 'test_value_53',
'standalone_config_sync': 'enable',
'standalone_mgmt_vdom': 'enable',
'sync_config': 'enable',
'sync_packet_balance': 'enable',
'unicast_hb': 'enable',
'unicast_hb_netmask': 'test_value_59',
'unicast_hb_peerip': 'test_value_60',
'uninterruptible_upgrade': 'enable',
'vcluster_id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ha.fortios_system(input_data, fos_instance)
expected_data = {
'arps': '3',
'arps-interval': '4',
'authentication': 'enable',
'cpu-threshold': 'test_value_6',
'encryption': 'enable',
'ftp-proxy-threshold': 'test_value_8',
'gratuitous-arps': 'enable',
'group-id': '10',
'group-name': 'test_value_11',
'ha-direct': 'enable',
'ha-eth-type': 'test_value_13',
'ha-mgmt-status': 'enable',
'ha-uptime-diff-margin': '15',
'hb-interval': '16',
'hb-lost-threshold': '17',
'hbdev': 'test_value_18',
'hc-eth-type': 'test_value_19',
'hello-holddown': '20',
'http-proxy-threshold': 'test_value_21',
'imap-proxy-threshold': 'test_value_22',
'inter-cluster-session-sync': 'enable',
'key': 'test_value_24',
'l2ep-eth-type': 'test_value_25',
'link-failed-signal': 'enable',
'load-balance-all': 'enable',
'memory-compatible-mode': 'enable',
'memory-threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast-ttl': '32',
'nntp-proxy-threshold': 'test_value_33',
'override': 'enable',
'override-wait-time': '35',
'password': 'test_value_36',
'pingserver-failover-threshold': '37',
'pingserver-flip-timeout': '38',
'pingserver-monitor-interface': 'test_value_39',
'pingserver-slave-force-reset': 'enable',
'pop3-proxy-threshold': 'test_value_41',
'priority': '42',
'route-hold': '43',
'route-ttl': '44',
'route-wait': '45',
'schedule': 'none',
'session-pickup': 'enable',
'session-pickup-connectionless': 'enable',
'session-pickup-delay': 'enable',
'session-pickup-expectation': 'enable',
'session-pickup-nat': 'enable',
'session-sync-dev': 'test_value_52',
'smtp-proxy-threshold': 'test_value_53',
'standalone-config-sync': 'enable',
'standalone-mgmt-vdom': 'enable',
'sync-config': 'enable',
'sync-packet-balance': 'enable',
'unicast-hb': 'enable',
'unicast-hb-netmask': 'test_value_59',
'unicast-hb-peerip': 'test_value_60',
'uninterruptible-upgrade': 'enable',
'vcluster-id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
}
set_method_mock.assert_called_with('system', 'ha', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_ha_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ha': {
'arps': '3',
'arps_interval': '4',
'authentication': 'enable',
'cpu_threshold': 'test_value_6',
'encryption': 'enable',
'ftp_proxy_threshold': 'test_value_8',
'gratuitous_arps': 'enable',
'group_id': '10',
'group_name': 'test_value_11',
'ha_direct': 'enable',
'ha_eth_type': 'test_value_13',
'ha_mgmt_status': 'enable',
'ha_uptime_diff_margin': '15',
'hb_interval': '16',
'hb_lost_threshold': '17',
'hbdev': 'test_value_18',
'hc_eth_type': 'test_value_19',
'hello_holddown': '20',
'http_proxy_threshold': 'test_value_21',
'imap_proxy_threshold': 'test_value_22',
'inter_cluster_session_sync': 'enable',
'key': 'test_value_24',
'l2ep_eth_type': 'test_value_25',
'link_failed_signal': 'enable',
'load_balance_all': 'enable',
'memory_compatible_mode': 'enable',
'memory_threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast_ttl': '32',
'nntp_proxy_threshold': 'test_value_33',
'override': 'enable',
'override_wait_time': '35',
'password': 'test_value_36',
'pingserver_failover_threshold': '37',
'pingserver_flip_timeout': '38',
'pingserver_monitor_interface': 'test_value_39',
'pingserver_slave_force_reset': 'enable',
'pop3_proxy_threshold': 'test_value_41',
'priority': '42',
'route_hold': '43',
'route_ttl': '44',
'route_wait': '45',
'schedule': 'none',
'session_pickup': 'enable',
'session_pickup_connectionless': 'enable',
'session_pickup_delay': 'enable',
'session_pickup_expectation': 'enable',
'session_pickup_nat': 'enable',
'session_sync_dev': 'test_value_52',
'smtp_proxy_threshold': 'test_value_53',
'standalone_config_sync': 'enable',
'standalone_mgmt_vdom': 'enable',
'sync_config': 'enable',
'sync_packet_balance': 'enable',
'unicast_hb': 'enable',
'unicast_hb_netmask': 'test_value_59',
'unicast_hb_peerip': 'test_value_60',
'uninterruptible_upgrade': 'enable',
'vcluster_id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ha.fortios_system(input_data, fos_instance)
expected_data = {
'arps': '3',
'arps-interval': '4',
'authentication': 'enable',
'cpu-threshold': 'test_value_6',
'encryption': 'enable',
'ftp-proxy-threshold': 'test_value_8',
'gratuitous-arps': 'enable',
'group-id': '10',
'group-name': 'test_value_11',
'ha-direct': 'enable',
'ha-eth-type': 'test_value_13',
'ha-mgmt-status': 'enable',
'ha-uptime-diff-margin': '15',
'hb-interval': '16',
'hb-lost-threshold': '17',
'hbdev': 'test_value_18',
'hc-eth-type': 'test_value_19',
'hello-holddown': '20',
'http-proxy-threshold': 'test_value_21',
'imap-proxy-threshold': 'test_value_22',
'inter-cluster-session-sync': 'enable',
'key': 'test_value_24',
'l2ep-eth-type': 'test_value_25',
'link-failed-signal': 'enable',
'load-balance-all': 'enable',
'memory-compatible-mode': 'enable',
'memory-threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast-ttl': '32',
'nntp-proxy-threshold': 'test_value_33',
'override': 'enable',
'override-wait-time': '35',
'password': 'test_value_36',
'pingserver-failover-threshold': '37',
'pingserver-flip-timeout': '38',
'pingserver-monitor-interface': 'test_value_39',
'pingserver-slave-force-reset': 'enable',
'pop3-proxy-threshold': 'test_value_41',
'priority': '42',
'route-hold': '43',
'route-ttl': '44',
'route-wait': '45',
'schedule': 'none',
'session-pickup': 'enable',
'session-pickup-connectionless': 'enable',
'session-pickup-delay': 'enable',
'session-pickup-expectation': 'enable',
'session-pickup-nat': 'enable',
'session-sync-dev': 'test_value_52',
'smtp-proxy-threshold': 'test_value_53',
'standalone-config-sync': 'enable',
'standalone-mgmt-vdom': 'enable',
'sync-config': 'enable',
'sync-packet-balance': 'enable',
'unicast-hb': 'enable',
'unicast-hb-netmask': 'test_value_59',
'unicast-hb-peerip': 'test_value_60',
'uninterruptible-upgrade': 'enable',
'vcluster-id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
}
set_method_mock.assert_called_with('system', 'ha', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_ha_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ha': {
'arps': '3',
'arps_interval': '4',
'authentication': 'enable',
'cpu_threshold': 'test_value_6',
'encryption': 'enable',
'ftp_proxy_threshold': 'test_value_8',
'gratuitous_arps': 'enable',
'group_id': '10',
'group_name': 'test_value_11',
'ha_direct': 'enable',
'ha_eth_type': 'test_value_13',
'ha_mgmt_status': 'enable',
'ha_uptime_diff_margin': '15',
'hb_interval': '16',
'hb_lost_threshold': '17',
'hbdev': 'test_value_18',
'hc_eth_type': 'test_value_19',
'hello_holddown': '20',
'http_proxy_threshold': 'test_value_21',
'imap_proxy_threshold': 'test_value_22',
'inter_cluster_session_sync': 'enable',
'key': 'test_value_24',
'l2ep_eth_type': 'test_value_25',
'link_failed_signal': 'enable',
'load_balance_all': 'enable',
'memory_compatible_mode': 'enable',
'memory_threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast_ttl': '32',
'nntp_proxy_threshold': 'test_value_33',
'override': 'enable',
'override_wait_time': '35',
'password': 'test_value_36',
'pingserver_failover_threshold': '37',
'pingserver_flip_timeout': '38',
'pingserver_monitor_interface': 'test_value_39',
'pingserver_slave_force_reset': 'enable',
'pop3_proxy_threshold': 'test_value_41',
'priority': '42',
'route_hold': '43',
'route_ttl': '44',
'route_wait': '45',
'schedule': 'none',
'session_pickup': 'enable',
'session_pickup_connectionless': 'enable',
'session_pickup_delay': 'enable',
'session_pickup_expectation': 'enable',
'session_pickup_nat': 'enable',
'session_sync_dev': 'test_value_52',
'smtp_proxy_threshold': 'test_value_53',
'standalone_config_sync': 'enable',
'standalone_mgmt_vdom': 'enable',
'sync_config': 'enable',
'sync_packet_balance': 'enable',
'unicast_hb': 'enable',
'unicast_hb_netmask': 'test_value_59',
'unicast_hb_peerip': 'test_value_60',
'uninterruptible_upgrade': 'enable',
'vcluster_id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ha.fortios_system(input_data, fos_instance)
expected_data = {
'arps': '3',
'arps-interval': '4',
'authentication': 'enable',
'cpu-threshold': 'test_value_6',
'encryption': 'enable',
'ftp-proxy-threshold': 'test_value_8',
'gratuitous-arps': 'enable',
'group-id': '10',
'group-name': 'test_value_11',
'ha-direct': 'enable',
'ha-eth-type': 'test_value_13',
'ha-mgmt-status': 'enable',
'ha-uptime-diff-margin': '15',
'hb-interval': '16',
'hb-lost-threshold': '17',
'hbdev': 'test_value_18',
'hc-eth-type': 'test_value_19',
'hello-holddown': '20',
'http-proxy-threshold': 'test_value_21',
'imap-proxy-threshold': 'test_value_22',
'inter-cluster-session-sync': 'enable',
'key': 'test_value_24',
'l2ep-eth-type': 'test_value_25',
'link-failed-signal': 'enable',
'load-balance-all': 'enable',
'memory-compatible-mode': 'enable',
'memory-threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast-ttl': '32',
'nntp-proxy-threshold': 'test_value_33',
'override': 'enable',
'override-wait-time': '35',
'password': 'test_value_36',
'pingserver-failover-threshold': '37',
'pingserver-flip-timeout': '38',
'pingserver-monitor-interface': 'test_value_39',
'pingserver-slave-force-reset': 'enable',
'pop3-proxy-threshold': 'test_value_41',
'priority': '42',
'route-hold': '43',
'route-ttl': '44',
'route-wait': '45',
'schedule': 'none',
'session-pickup': 'enable',
'session-pickup-connectionless': 'enable',
'session-pickup-delay': 'enable',
'session-pickup-expectation': 'enable',
'session-pickup-nat': 'enable',
'session-sync-dev': 'test_value_52',
'smtp-proxy-threshold': 'test_value_53',
'standalone-config-sync': 'enable',
'standalone-mgmt-vdom': 'enable',
'sync-config': 'enable',
'sync-packet-balance': 'enable',
'unicast-hb': 'enable',
'unicast-hb-netmask': 'test_value_59',
'unicast-hb-peerip': 'test_value_60',
'uninterruptible-upgrade': 'enable',
'vcluster-id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
}
set_method_mock.assert_called_with('system', 'ha', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_ha_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ha': {
'random_attribute_not_valid': 'tag',
'arps': '3',
'arps_interval': '4',
'authentication': 'enable',
'cpu_threshold': 'test_value_6',
'encryption': 'enable',
'ftp_proxy_threshold': 'test_value_8',
'gratuitous_arps': 'enable',
'group_id': '10',
'group_name': 'test_value_11',
'ha_direct': 'enable',
'ha_eth_type': 'test_value_13',
'ha_mgmt_status': 'enable',
'ha_uptime_diff_margin': '15',
'hb_interval': '16',
'hb_lost_threshold': '17',
'hbdev': 'test_value_18',
'hc_eth_type': 'test_value_19',
'hello_holddown': '20',
'http_proxy_threshold': 'test_value_21',
'imap_proxy_threshold': 'test_value_22',
'inter_cluster_session_sync': 'enable',
'key': 'test_value_24',
'l2ep_eth_type': 'test_value_25',
'link_failed_signal': 'enable',
'load_balance_all': 'enable',
'memory_compatible_mode': 'enable',
'memory_threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast_ttl': '32',
'nntp_proxy_threshold': 'test_value_33',
'override': 'enable',
'override_wait_time': '35',
'password': 'test_value_36',
'pingserver_failover_threshold': '37',
'pingserver_flip_timeout': '38',
'pingserver_monitor_interface': 'test_value_39',
'pingserver_slave_force_reset': 'enable',
'pop3_proxy_threshold': 'test_value_41',
'priority': '42',
'route_hold': '43',
'route_ttl': '44',
'route_wait': '45',
'schedule': 'none',
'session_pickup': 'enable',
'session_pickup_connectionless': 'enable',
'session_pickup_delay': 'enable',
'session_pickup_expectation': 'enable',
'session_pickup_nat': 'enable',
'session_sync_dev': 'test_value_52',
'smtp_proxy_threshold': 'test_value_53',
'standalone_config_sync': 'enable',
'standalone_mgmt_vdom': 'enable',
'sync_config': 'enable',
'sync_packet_balance': 'enable',
'unicast_hb': 'enable',
'unicast_hb_netmask': 'test_value_59',
'unicast_hb_peerip': 'test_value_60',
'uninterruptible_upgrade': 'enable',
'vcluster_id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ha.fortios_system(input_data, fos_instance)
expected_data = {
'arps': '3',
'arps-interval': '4',
'authentication': 'enable',
'cpu-threshold': 'test_value_6',
'encryption': 'enable',
'ftp-proxy-threshold': 'test_value_8',
'gratuitous-arps': 'enable',
'group-id': '10',
'group-name': 'test_value_11',
'ha-direct': 'enable',
'ha-eth-type': 'test_value_13',
'ha-mgmt-status': 'enable',
'ha-uptime-diff-margin': '15',
'hb-interval': '16',
'hb-lost-threshold': '17',
'hbdev': 'test_value_18',
'hc-eth-type': 'test_value_19',
'hello-holddown': '20',
'http-proxy-threshold': 'test_value_21',
'imap-proxy-threshold': 'test_value_22',
'inter-cluster-session-sync': 'enable',
'key': 'test_value_24',
'l2ep-eth-type': 'test_value_25',
'link-failed-signal': 'enable',
'load-balance-all': 'enable',
'memory-compatible-mode': 'enable',
'memory-threshold': 'test_value_29',
'mode': 'standalone',
'monitor': 'test_value_31',
'multicast-ttl': '32',
'nntp-proxy-threshold': 'test_value_33',
'override': 'enable',
'override-wait-time': '35',
'password': 'test_value_36',
'pingserver-failover-threshold': '37',
'pingserver-flip-timeout': '38',
'pingserver-monitor-interface': 'test_value_39',
'pingserver-slave-force-reset': 'enable',
'pop3-proxy-threshold': 'test_value_41',
'priority': '42',
'route-hold': '43',
'route-ttl': '44',
'route-wait': '45',
'schedule': 'none',
'session-pickup': 'enable',
'session-pickup-connectionless': 'enable',
'session-pickup-delay': 'enable',
'session-pickup-expectation': 'enable',
'session-pickup-nat': 'enable',
'session-sync-dev': 'test_value_52',
'smtp-proxy-threshold': 'test_value_53',
'standalone-config-sync': 'enable',
'standalone-mgmt-vdom': 'enable',
'sync-config': 'enable',
'sync-packet-balance': 'enable',
'unicast-hb': 'enable',
'unicast-hb-netmask': 'test_value_59',
'unicast-hb-peerip': 'test_value_60',
'uninterruptible-upgrade': 'enable',
'vcluster-id': '62',
'vcluster2': 'enable',
'vdom': 'test_value_64',
'weight': 'test_value_65'
}
set_method_mock.assert_called_with('system', 'ha', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| {
"content_hash": "a772b597ba3c37ec385ac16df8bef9ae",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 133,
"avg_line_length": 42.06973058637084,
"alnum_prop": 0.49321931741128605,
"repo_name": "thaim/ansible",
"id": "dc2fdfbfb1bddc72201088032bf2a75ff628e592",
"size": "27242",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_system_ha.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
from ...core.parameterization.parameterized import Parameterized
from paramz.core.observable_array import ObsAr
from paramz.caching import Cache_this
from .kernel_slice_operations import KernCallsViaSlicerMeta
from functools import reduce
import six
@six.add_metaclass(KernCallsViaSlicerMeta)
class Kern(Parameterized):
#===========================================================================
# This adds input slice support. The rather ugly code for slicing can be
# found in kernel_slice_operations
# __meataclass__ is ignored in Python 3 - needs to be put in the function definiton
# __metaclass__ = KernCallsViaSlicerMeta
# Here, we use the Python module six to support Py3 and Py2 simultaneously
#===========================================================================
_support_GPU = False
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
"""
The base class for a kernel: a positive definite function
which forms of a covariance function (kernel).
input_dim:
is the number of dimensions to work on. Make sure to give the
tight dimensionality of inputs.
You most likely want this to be the integer telling the number of
input dimensions of the kernel.
active_dims:
is the active_dimensions of inputs X we will work on.
All kernels will get sliced Xes as inputs, if _all_dims_active is not None
Only positive integers are allowed in active_dims!
if active_dims is None, slicing is switched off and all X will be passed through as given.
:param int input_dim: the number of input dimensions to the function
:param array-like|None active_dims: list of indices on which dimensions this kernel works on, or none if no slicing
Do not instantiate.
"""
super(Kern, self).__init__(name=name, *a, **kw)
self.input_dim = int(input_dim)
if active_dims is None:
active_dims = np.arange(input_dim)
self.active_dims = np.asarray(active_dims, np.int_)
self._all_dims_active = np.atleast_1d(self.active_dims).astype(int)
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}".format(self.input_dim, self._all_dims_active.size)
self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
from .psi_comp import PSICOMP_GH
self.psicomp = PSICOMP_GH()
def __setstate__(self, state):
self._all_dims_active = np.arange(0, max(state['active_dims']) + 1)
super(Kern, self).__setstate__(state)
@property
def _effective_input_dim(self):
return np.size(self._all_dims_active)
@Cache_this(limit=3)
def _slice_X(self, X):
try:
return X[:, self._all_dims_active].astype('float')
except:
return X[:, self._all_dims_active]
def K(self, X, X2):
"""
Compute the kernel function.
.. math::
K_{ij} = k(X_i, X_j)
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
handLes this as X2 == X.
"""
raise NotImplementedError
def Kdiag(self, X):
"""
The diagonal of the kernel matrix K
.. math::
Kdiag_{i} = k(X_i, X_i)
"""
raise NotImplementedError
def psi0(self, Z, variational_posterior):
"""
.. math::
\psi_0 = \sum_{i=0}^{n}E_{q(X)}[k(X_i, X_i)]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
"""
.. math::
\psi_1^{n,m} = E_{q(X)}[k(X_n, Z_m)]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
"""
.. math::
\psi_2^{m,m'} = \sum_{i=0}^{n}E_{q(X)}[ k(Z_m, X_i) k(X_i, Z_{m'})]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
"""
.. math::
\psi_2^{n,m,m'} = E_{q(X)}[ k(Z_m, X_n) k(X_n, Z_{m'})]
Thus, we do not sum out n, compared to psi2
"""
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def gradients_X(self, dL_dK, X, X2):
"""
.. math::
\\frac{\partial L}{\partial X} = \\frac{\partial L}{\partial K}\\frac{\partial K}{\partial X}
"""
raise NotImplementedError
def gradients_X_X2(self, dL_dK, X, X2):
return self.gradients_X(dL_dK, X, X2), self.gradients_X(dL_dK.T, X2, X)
def gradients_XX(self, dL_dK, X, X2, cov=True):
"""
.. math::
\\frac{\partial^2 L}{\partial X\partial X_2} = \\frac{\partial L}{\partial K}\\frac{\partial^2 K}{\partial X\partial X_2}
"""
raise NotImplementedError("This is the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_XX_diag(self, dL_dKdiag, X, cov=True):
"""
The diagonal of the second derivative w.r.t. X and X2
"""
raise NotImplementedError("This is the diagonal of the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_X_diag(self, dL_dKdiag, X):
"""
The diagonal of the derivative w.r.t. X
"""
raise NotImplementedError
def update_gradients_diag(self, dL_dKdiag, X):
""" update the gradients of all parameters when using only the diagonal elements of the covariance matrix"""
raise NotImplementedError
def update_gradients_full(self, dL_dK, X, X2):
"""Set the gradients of all parameters when doing full (N) inference."""
raise NotImplementedError
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Set the gradients of all parameters when doing inference with
uncertain inputs, using expectations of the kernel.
The essential maths is
.. math::
\\frac{\partial L}{\partial \\theta_i} & = \\frac{\partial L}{\partial \psi_0}\\frac{\partial \psi_0}{\partial \\theta_i}\\
& \quad + \\frac{\partial L}{\partial \psi_1}\\frac{\partial \psi_1}{\partial \\theta_i}\\
& \quad + \\frac{\partial L}{\partial \psi_2}\\frac{\partial \psi_2}{\partial \\theta_i}
Thus, we push the different derivatives through the gradients of the psi
statistics. Be sure to set the gradients for all kernel
parameters here.
"""
dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
self.gradient[:] = dtheta
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
psi0=None, psi1=None, psi2=None):
"""
Returns the derivative of the objective wrt Z, using the chain rule
through the expectation variables.
"""
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Compute the gradients wrt the parameters of the variational
distruibution q(X), chain-ruling via the expectations of the kernel
"""
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:]
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
"""
plot this kernel.
:param x: the value to use for the other kernel argument (kernels are a function of two variables!)
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title: the matplotlib title
:param plot_limits: the range over which to plot the kernel
:resolution: the resolution of the lines used in plotting
:mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7)
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
kernel_plots.plot(self, x, fignum, ax, title, plot_limits, resolution, **mpl_kwargs)
def input_sensitivity(self, summarize=True):
"""
Returns the sensitivity for each dimension of this kernel.
"""
return np.zeros(self.input_dim)
def get_most_significant_input_dimensions(self, which_indices=None):
"""
Determine which dimensions should be plotted
Returns the top three most signification input dimensions
if less then three dimensions, the non existing dimensions are
labeled as None, so for a 1 dimensional input this returns
(0, None, None).
:param which_indices: force the indices to be the given indices.
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
"""
if which_indices is None:
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
try:
input_1, input_2, input_3 = which_indices
except ValueError:
# which indices is tuple or int
try:
input_3 = None
input_1, input_2 = which_indices
except TypeError:
# which_indices is an int
input_1, input_2 = which_indices, None
except ValueError:
# which_indices was a list or array like with only one int
input_1, input_2 = which_indices[0], None
return input_1, input_2, input_3
def __add__(self, other):
""" Overloading of the '+' operator. for more control, see self.add """
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def add(self, other, name='sum'):
"""
Add another kernel to this one.
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be added to kernels..."
from .add import Add
return Add([self, other], name=name)
def __mul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __imul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __pow__(self, other):
"""
Shortcut for tensor `prod`.
"""
assert np.all(self._all_dims_active == range(self.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
assert np.all(other._all_dims_active == range(other.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
other._all_dims_active += self.input_dim
return self.prod(other)
def prod(self, other, name='mul'):
"""
Multiply two kernels (either on the same space, or on the tensor
product of the input space).
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
from .prod import Prod
# kernels = []
# if isinstance(self, Prod): kernels.extend(self.parameters)
# else: kernels.append(self)
# if isinstance(other, Prod): kernels.extend(other.parameters)
# else: kernels.append(other)
return Prod([self, other], name)
def _check_input_dim(self, X):
assert X.shape[1] == self.input_dim, "{} did not specify active_dims and X has wrong shape: X_dim={}, whereas input_dim={}".format(self.name, X.shape[1], self.input_dim)
def _check_active_dims(self, X):
assert X.shape[1] >= len(self._all_dims_active), "At least {} dimensional X needed, X.shape={!s}".format(len(self._all_dims_active), X.shape)
class CombinationKernel(Kern):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
"""
def __init__(self, kernels, name, extra_dims=[]):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
:param list kernels: List of kernels to combine (can be only one element)
:param str name: name of the combination kernel
:param array-like extra_dims: if needed extra dimensions for the combination kernel to work on
"""
assert all([isinstance(k, Kern) for k in kernels])
extra_dims = np.asarray(extra_dims, dtype=int)
active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), extra_dims)
input_dim = active_dims.size
# initialize the kernel with the full input_dim
super(CombinationKernel, self).__init__(input_dim, active_dims, name)
effective_input_dim = reduce(max, (k._all_dims_active.max() for k in kernels)) + 1
self._all_dims_active = np.array(np.concatenate((np.arange(effective_input_dim), extra_dims if extra_dims is not None else [])), dtype=int)
self.extra_dims = extra_dims
self.link_parameters(*kernels)
@property
def parts(self):
return self.parameters
def _set_all_dims_ative(self):
self._all_dims_active = np.atleast_1d(self.active_dims).astype(int)
def input_sensitivity(self, summarize=True):
"""
If summize is true, we want to get the summerized view of the sensitivities,
otherwise put everything into an array with shape (#kernels, input_dim)
in the order of appearance of the kernels in the parameterized object.
"""
if not summarize:
num_params = [0]
parts = []
def sum_params(x):
if (not isinstance(x, CombinationKernel)) and isinstance(x, Kern):
num_params[0] += 1
parts.append(x)
self.traverse(sum_params)
i_s = np.zeros((num_params[0], self.input_dim))
from operator import setitem
[setitem(i_s, (i, k._all_dims_active), k.input_sensitivity(summarize)) for i, k in enumerate(parts)]
return i_s
else:
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. You need to override the default behaviour for getting the input sensitivity to be able to get the input sensitivity. For sum kernel it is the sum of all sensitivities, TODO: product kernel? Other kernels?, also TODO: shall we return all the sensitivities here in the combination kernel? So we can combine them however we want? This could lead to just plot all the sensitivities here...")
def _check_active_dims(self, X):
return
def _check_input_dim(self, X):
# As combination kernels cannot always know, what their inner kernels have as input dims, the check will be done inside them, respectively
return
| {
"content_hash": "d8a88b5103301973f7d49c2efcd24b58",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 482,
"avg_line_length": 42.501347708894876,
"alnum_prop": 0.6124429223744292,
"repo_name": "avehtari/GPy",
"id": "4379fb71f4ca6ffd518ee7989a0e97ce0927465d",
"size": "15881",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "GPy/kern/src/kern.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2030"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "2001370"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
"""Definition of a pulse, the elementary building block of sequences.
"""
import numpy as np
from atom.api import (Str, Enum, Typed, Property, set_default)
from exopy.utils.atom_util import (update_members_from_preferences)
from .shapes.base_shape import AbstractShape
from .shapes.modulation import Modulation
from .item import Item
class Pulse(Item):
""" Represent a pulse to perfom during a sequence.
"""
#: The kind of pulse can be either logical or ananlogical.
kind = Enum('Logical', 'Analogical').tag(pref=True)
#: Channel of the executioner which should perfom the pulse.
channel = Str().tag(pref=True)
#: Waveform
waveform = Property()
#: Modulation to apply to the pulse. Only enabled in analogical mode.
modulation = Typed(Modulation, ()).tag(pref=True)
#: Shape of the pulse. Only enabled in analogical mode.
shape = Typed(AbstractShape).tag(pref=True)
linkable_vars = set_default(['start', 'stop', 'duration'])
def eval_entries(self, root_vars, sequence_locals, missings, errors):
""" Attempt to eval the string parameters of the pulse.
Parameters
----------
root_vars : dict
Dictionary of global variables for the all items. This will
tipically contains the i_start/stop/duration and the root vars.
sequence_locals : dict
Dictionary of variables whose scope is limited to this item
parent.
missings : set
Set of unfound local variables.
errors : dict
Dict of the errors which happened when performing the evaluation.
Returns
-------
flag : bool
Boolean indicating whether or not the evaluation succeeded.
"""
success = super(Pulse, self).eval_entries(root_vars, sequence_locals,
missings, errors)
if success and self.kind == 'Analogical':
# Shapes are not allowed to modify global vars hence the empty
# dict
success &= self.modulation.eval_entries({}, sequence_locals,
missings, errors)
self.shape.index = self.index
success &= self.shape.eval_entries({}, sequence_locals,
missings, errors)
return success
@classmethod
def build_from_config(cls, config, dependencies):
""" Create a new instance using the provided infos for initialisation.
Parameters
----------
config : dict(str)
Dictionary holding the new values to give to the members in string
format, or dictionnary like for instance with prefs.
dependencies : dict
Dictionary holding the necessary classes needed when rebuilding.
Returns
-------
pulse : pulse
Newly created and initiliazed sequence.
Notes
-----
This method is fairly powerful and can handle a lot of cases so
don't override it without checking that it works.
"""
pulse = cls()
#: Initialize the shape object with the right class, so that after
#: update_members_from_preferences can do all the rest (initialize
#: the shape's members)
if 'shape' in config:
shape_config = config['shape']
if not shape_config == 'None':
s_id = shape_config.pop('shape_id')
s_cls = dependencies['exopy.pulses.shape'][s_id]
shape = s_cls()
pulse.shape = shape
update_members_from_preferences(pulse, config)
return pulse
def traverse(self, depth=-1):
"""Yield a task and all of its components.
The base implementation simply yields the task itself.
Parameters
----------
depth : int
How deep should we explore the tree of tasks. When this number
reaches zero deeper children should not be explored but simply
yielded.
"""
for i in super(Pulse, self).traverse(depth=depth):
yield i
if self.kind == 'Analogical':
yield self.modulation
if self.shape:
yield self.shape
def clean_cached_values(self):
"""Also clean modualtion and shape if necessary.
"""
super(Pulse, self).clean_cached_values()
if self.kind == 'Analogical':
self.modulation.clean_cached_values()
self.shape.clean_cached_values()
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _get_waveform(self):
""" Getter for the waveform property.
"""
context = self.root.context
n_points = context.len_sample(self.duration)
if self.kind == 'Analogical':
time = np.linspace(self.start, self.stop, n_points, False)
mod = self.modulation.compute(time, context.time_unit)
shape = self.shape.compute(time, context.time_unit)
return mod * shape
else:
return np.ones(n_points, dtype=np.int8)
| {
"content_hash": "df72f6bd84b13a076902453c95ce0616",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.5641025641025641,
"repo_name": "Ecpy/ecpy_pulses",
"id": "f809679c312727ecb23ad68cd21229304eeef44c",
"size": "5771",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "exopy_pulses/pulses/pulse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "Python",
"bytes": "377974"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
} |
from components.component import valid_components
class GameObject(object):
def __init__(self):
self.components = {}
self.observers = {}
self.responders = {}
def copy_to(self, new_game_object):
for component in self.components.values():
new_game_object.register_component(component.copy())
return new_game_object
def get_component(self, component_name):
return self.components.get(component_name, None)
def update(self):
for component in self.components.values():
component.update()
def transmit_message(self, sender, message_type, **kwargs):
if message_type in self.observers:
for observer, func in self.observers[message_type]:
if observer != sender:
func(**kwargs)
def transmit_query(self, sender, query_type, **kwargs):
responses = []
if query_type in self.responders:
for responder, func in self.responders[query_type]:
if responder != sender:
responses.append(func(**kwargs))
return responses
def register_observer(self, observer, message_type, func):
if message_type not in self.observers:
self.observers[message_type] = []
if func not in self.observers[message_type]:
self.observers[message_type].append((observer, func))
def register_query_responder(self, responder, query_type, func):
if query_type not in self.responders:
self.responders[query_type] = []
if func not in self.responders[query_type]:
self.responders[query_type].append((responder, func))
def register_component(self, component):
if component.NAME in self.components:
self.unregister_component(component)
self.components[component.NAME] = component
component.on_register(self)
def unregister_component(self, component):
if component.NAME in self.components:
component.on_unregister()
del self.components[component.NAME]
def __getattr__(self, item):
if item in valid_components:
component = self.get_component(item)
if component:
return component
return NoneVoid()
raise AttributeError()
class NoneVoid(object):
"""
This class's only purpose is to Falsify any other calls make to get attributes from it.
It allows us to duck type into components a little easier.
"""
def __getattr__(self, item):
return None
def __bool__(self):
return False
| {
"content_hash": "c5de6b4393534069082e4d941c2c54dd",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 91,
"avg_line_length": 32.617283950617285,
"alnum_prop": 0.6180923542770629,
"repo_name": "jpalladino84/roguelike-game",
"id": "6e8b4114885111ae7b77773d2c8edb8e89ef8802",
"size": "2642",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "components/game_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153072"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
'''
This package provides commonly used methods for dealing with file operation,
including working with network file system like S3, http, etc.
'''
import os
import sys
import time
import boto
import boto.s3
import tarfile
import logging
import shutil
import tempfile
import subprocess
from subprocess import PIPE
from boto.exception import S3ResponseError
__logger__ = logging.getLogger(__name__)
__RETRY_TIMES = 5
__SLEEP_SECONDS_BETWEEN_RETRIES = 2
__GRAPHLAB_S3_USE_BOTO = "GRAPHLAB_UNIT_TEST"
def _use_boto():
return __GRAPHLAB_S3_USE_BOTO in os.environ and \
os.environ[__GRAPHLAB_S3_USE_BOTO] == 'PredictiveService'
def get_protocol(path):
'''Given a path, returns the protocol the path uses
For example,
's3://a/b/c/' returns 's3'
'http://a/b/c' returns 'http'
'tmp/a/bc/' returns ''
'''
pos = path.find('://')
if pos < 0:
return ''
return path[0:pos].lower()
def is_path(string):
if not isinstance(string, str):
return False
return is_local_path(string) or is_s3_path(string)
def mkdir(path):
if is_local_path(path):
os.makedirs(path)
elif is_hdfs_path(path):
hdfs_mkdir(path)
elif is_s3_path(path):
# no need to make any directory
pass
else:
raise ValueError('Unsupported protocol %s' % path)
def exists(path, aws_credentials = {}):
if is_local_path(path):
return os.path.exists(path)
elif is_hdfs_path(path):
return hdfs_test_url(path)
elif is_s3_path(path):
return s3_test_url(path, aws_credentials = aws_credentials)
else:
raise ValueError('Unsupported protocol %s' % path)
def touch(path):
if is_local_path(path):
with open(path, 'a'):
os.utime(path, None)
elif is_hdfs_path(path):
hdfs_touch(path)
elif is_s3_path(path):
s3_touch(path)
else:
raise ValueError('Unsupported protocol %s' % path)
def read(path):
if is_local_path(path):
return open(path).read()
elif is_hdfs_path(path):
return read_file_to_string_hdfs(path)
elif is_s3_path(path):
return read_file_to_string_s3(path)
else:
raise ValueError('Unsupported protocol %s' % path)
def copy_from_local(localpath, remotepath):
if is_hdfs_path(remotepath):
upload_to_hdfs(localpath, remotepath)
elif is_s3_path(remotepath):
upload_to_s3(localpath, remotepath)
elif is_local_path(remotepath):
shutil.copy(localpath, remotepath)
else:
raise ValueError('Unsupported protocol %s' % remotepath)
def is_local_path(path):
'''Returns True if the path indicates a local path, otherwise False'''
protocol = get_protocol(path)
return protocol != 'hdfs' and protocol != 's3' and \
protocol != 'http' and protocol != 'https'
def is_s3_path(path):
'''Returns True if the path indicates a s3 path, otherwise False'''
protocol = get_protocol(path)
return protocol == 's3'
def is_hdfs_path(path):
'''Returns True if the path indicates a s3 path, otherwise False'''
protocol = get_protocol(path)
return protocol == 'hdfs'
def upload_to_local(src_path, dst_path, is_dir=False, silent=False):
'''Copies a file/dir to a local path'''
if not silent:
__logger__.info('Uploading local path %s to path: %s' % (src_path, dst_path))
if not os.path.exists(src_path):
raise RuntimeError("Cannot find file/path: %s" % src_path)
if not is_dir and not os.path.isfile(src_path):
raise RuntimeError("Path %s is not a file" % src_path)
if is_dir and not os.path.isdir(src_path):
raise RuntimeError("Path %s is not a directory" % src_path)
if not is_local_path(dst_path):
raise RuntimeError("Path %s is not a valid dest path" % dst_path)
# now upload
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
if is_dir:
shutil.copytree(src_path, dst_path)
else:
shutil.copy(src_path, dst_path)
# We are done
if not silent:
__logger__.info("Successfully uploaded to path %s" % dst_path)
break
except Exception as e:
num_retries = num_retries + 1
__logger__.info("Error hit while copying file: %s" % e)
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
def read_file_to_string_s3(s3_path, max_size=None, aws_credentials={}):
''' Read a file in s3 to a string
'''
if not is_s3_path(s3_path):
raise RuntimeError("Path %s is not a valid s3 path" % s3_path)
k = _get_s3_key(s3_path, aws_credentials=aws_credentials)
if k:
k.open_read()
if max_size and long(k.size) > max_size:
raise RuntimeError("Cannot read file greater than max size %s." % str(max_size))
return k.get_contents_as_string()
return None
def list_s3(s3_path, aws_credentials={}):
''' List a directory in s3
'''
if not is_s3_path(s3_path):
raise RuntimeError("Path %s is not a valid s3 path" % s3_path)
(s3_bucket_name, s3_key_prefix) = parse_s3_path(s3_path)
conn = boto.connect_s3(**aws_credentials)
bucket = conn.get_bucket(s3_bucket_name)
# get list of keys with the prefix as the s3_path, in other words,
# this returns the keys to all the files under this s3_path dir.
k_list = list(bucket.list(s3_key_prefix))
key_list = []
for k in k_list:
if k.size:
s3_full_path = os.path.join("s3://", bucket.name, k.name)
key_list.append({"path":s3_full_path, "size":str(k.size)})
return key_list
def upload_to_s3(local_path, s3_path, is_dir=False, aws_credentials={}, silent=False):
'''Upload a local file to s3
'''
if not silent:
__logger__.info('Uploading local path %s to s3 path: %s' % (local_path, s3_path))
if not os.path.exists(local_path):
raise RuntimeError("Cannot find file: %s" % local_path)
if not is_dir and not os.path.isfile(local_path):
raise RuntimeError("Path %s is not a file" % local_path)
if is_dir and not os.path.isdir(local_path):
raise RuntimeError("Path %s is not a directory" % local_path)
if not is_s3_path(s3_path):
raise RuntimeError("Path %s is not a valid s3 path" % s3_path)
# now upload
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
# Use BOTO
if _use_boto():
# Relative and absolute paths. S3 paths are relative while
# file paths on the local machine are not.
abs_path = os.path.abspath(local_path)
# Remove files that are already there.
s3_recursive_delete(s3_path)
# Get all the files.
uploadFileNames = {}
if os.path.isdir(abs_path):
for (source_dir, sub_dirs, files) in os.walk(abs_path):
for f in files:
path = os.path.join(source_dir, f)
key = '%s/%s' % (s3_path, os.path.relpath(path, abs_path))
uploadFileNames[key] = path
else:
key = s3_path
uploadFileNames[key] = abs_path
# Upload each file
for key, path in uploadFileNames.items():
k = _get_s3_key(key, aws_credentials)
__logger__.info("Uploading %s to %s." % (path, key))
k.set_contents_from_filename(
path,
num_cb=10)
# Use awscli
else:
_awscli_s3_op('cp', local_path, s3_path, recursive=is_dir,
silent = silent, aws_credentials=aws_credentials)
# We are done
if not silent:
__logger__.info("Successfully uploaded to s3 path %s" % s3_path)
break
except Exception as e:
num_retries = num_retries + 1
__logger__.info("Error hit while uploading to S3: %s" % e)
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
def get_s3_bucket_region(s3_bucket_name, aws_credentials={}):
conn = boto.connect_s3(**aws_credentials)
bucket = conn.get_bucket(s3_bucket_name)
return bucket.get_location() or "us-east-1" # default=us-standard
def _is_valid_s3_key(s3_path, aws_credentials={}):
key_prefix = _get_s3_key(s3_path)
return key_prefix.exists()
def download_from_s3(s3_path, local_path, is_dir=False, aws_credentials={}, silent=False):
'''Download a file from S3'''
local_file = expand_full_path(local_path)
if not silent:
__logger__.info('Downloading %s from s3 to local path %s' % (s3_path, local_path))
if not is_local_path(local_path):
raise RuntimeError("Invalid path: %s" % local_path)
if os.path.exists(local_path):
__logger__.debug("Overwriting file/path %s" % local_path)
if not is_s3_path(s3_path):
raise RuntimeError("Path %s is not a valid S3 path" % s3_path)
# now download
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
# check if should use boto
if _use_boto():
key_prefix = _get_s3_key(s3_path)
bucket = key_prefix.bucket
abs_path = os.path.abspath(local_path)
# Download to a file
if not is_dir:
if not key_prefix.exists():
raise RuntimeError("Key %s does not exist." % s3_path)
key_prefix.get_contents_to_filename(
abs_path,
cb = _s3_callback,
num_cb = 10)
# Download to a directory.
else:
# Get all the keys with the same prefix.
keys = []
for k in bucket.list(prefix = key_prefix.key):
keys.append(k)
if len(keys) == 0:
raise RuntimeError("Key %s does not exist." % s3_path)
# Make a new directory.
if os.path.exists(local_path):
shutil.rmtree(local_path)
os.makedirs(local_path)
for k in keys:
# Key-name relative to directory name.
s3_key = 's3://%s/%s' % (k.bucket.name, k.key)
rel_file = os.path.relpath(s3_key, s3_path)
path = '/'.join([abs_path, rel_file])
dirname = os.path.dirname(path)
# This is a directory. Make it.
if not os.path.exists(dirname):
os.makedirs(dirname)
k.get_contents_to_filename(
path,
cb = _s3_callback,
num_cb = 10)
else:
# use awscli
_awscli_s3_op('cp', s3_path, local_path, recursive=is_dir, aws_credentials=aws_credentials,
silent=silent)
__logger__.debug("Successfully downloaded file %s from s3" % s3_path)
break
except Exception as e:
num_retries = num_retries + 1
if not silent:
__logger__.info("Error hit while download from S3: %s" % e)
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
def parse_s3_path(path):
'''Parse a s3 path to bucket name and path name
Parameters
-----------
path : str
s3 path like: s3://bucket_name/path/to/somewhere
Returns
--------
out : (bucket_name, path_name)
'''
if not is_s3_path(path):
raise ValueError('path is not a s3 path: %s' % path)
tokens = path.split('/')
bucket_name = tokens[2]
s3_directory = '/'.join(tokens[3:])
# remove trailing '/' if exists
if s3_directory and s3_directory[-1] == '/':
s3_directory = s3_directory[0:-1]
return (bucket_name, s3_directory)
def s3_delete_key(s3_bucket_name, s3_key_name, aws_credentials = {}):
conn = boto.connect_s3(**aws_credentials)
bucket = conn.get_bucket(s3_bucket_name, validate=False)
bucket.delete_key(s3_key_name)
def s3_touch(path, aws_credentials = {}):
"""
Create an empty file in s3
"""
with tempfile.NamedTemporaryFile() as f:
upload_to_s3(f.name, path, aws_credentials=aws_credentials, silent=True)
def read_file_to_string_hdfs(hdfs_path, max_size=None, hadoop_conf_dir=None):
''' Read a file from hdfs and return the string content
'''
# list the file first, should be a single file
file_info = list_hdfs(hdfs_path, hadoop_conf_dir=hadoop_conf_dir)
if not file_info or len(file_info) != 1:
return None
if max_size and long(file_info[0]['size']) > max_size:
raise RuntimeError("Cannot read file larger than max size %s." % str(max_size))
base_command = 'hadoop fs -cat '
if hadoop_conf_dir:
base_command = 'hadoop --config %s fs -cat ' % hadoop_conf_dir
base_command += '\"%s\" ' % hdfs_path
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code == 0:
return stdo
return None
def remove_hdfs(hdfs_path, hadoop_conf_dir=None, recursive=False):
base_command = 'hadoop fs -rm -f '
'''
Remove all file/files in the given path, recursively.
'''
if hadoop_conf_dir:
base_command = 'hadoop --config %s fs -rm -f ' % hadoop_conf_dir
if recursive:
base_command += '-r '
base_command += '\"%s\" ' % hdfs_path
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code != 0:
raise RuntimeError('Error encounted trying to run the following command: \n%s\n, '
'Output from the command: \n%s' % (base_command, stdo))
def list_hdfs(hdfs_path, hadoop_conf_dir=None):
base_command = 'hadoop fs -ls '
if hadoop_conf_dir:
base_command = 'hadoop --config %s fs -ls ' % hadoop_conf_dir
base_command += '\"%s\" ' % hdfs_path
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code == 0:
files = []
lines = stdo.split("\n")
for l in lines:
if len(l) == 0 or l.startswith("Found"):
continue
file_line = l.split(hdfs_path)
file_info = file_line[0].split() # split file info string
file_name = file_line[1] # file name
file_size = file_info[4] # file size
files.append({"path": hdfs_path + file_name, "size":file_size})
return files
return None
def copy_to_hdfs(src_hdfs_path, dst_hdfs_path, hadoop_conf_dir=None, force=False):
base_command= 'hadoop fs -cp '
if hadoop_conf_dir:
base_command='hadoop --config %s fs -cp ' % hadoop_conf_dir
if force:
base_command += ' -f'
base_command += ' %s %s ' % (src_hdfs_path, dst_hdfs_path)
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code != 0:
raise RuntimeError("Failed to copy hdfs %s -> hdfs %s: %s" % (src_hdfs_path,dst_hdfs_path, stde))
def upload_to_hdfs(local_path, hdfs_path, hadoop_conf_dir=None, force=False):
base_command= 'hadoop fs -put '
if hadoop_conf_dir:
base_command='hadoop --config %s fs -put ' % hadoop_conf_dir
if force:
base_command += ' -f'
base_command += ' %s %s ' % (local_path, hdfs_path)
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code != 0:
raise RuntimeError("Failed to copy %s -> hdfs %s: %s" % (local_path, hdfs_path, stde))
def upload_folder_to_hdfs(local_path, hdfs_path, hadoop_conf_dir=None):
if not os.path.isdir(local_path):
raise RuntimeError("'%s' has to be a directory" % local_path)
base_command= 'hadoop fs '
if hadoop_conf_dir:
base_command='hadoop --config %s fs ' % hadoop_conf_dir
cp_command = '%s -copyFromLocal %s %s ' % (base_command, local_path, hdfs_path)
exit_code, stdo, stde = _hdfs_exec_command(cp_command)
if exit_code != 0:
raise RuntimeError("Failed to copy directory %s -> hdfs %s: %s" % (local_path, hdfs_path, stde))
# change folder permission
chang_permission_cmd = base_command + '-chmod -R a+rwx \"%s\"' % hdfs_path
exit_code, stdo, stde = _hdfs_exec_command(chang_permission_cmd)
if exit_code != 0:
raise RuntimeError("error changing permissions %s" % stde)
# Todo -- maybe use copyToLocal instead /*
def download_from_hdfs(hdfs_path, local_path, hadoop_conf_dir=None, is_dir = False):
if is_dir:
hdfs_path = '%s/*' % hdfs_path
if hadoop_conf_dir:
base_command = 'hadoop --config %s fs -get \"%s\" \"%s\" ' % (hadoop_conf_dir, hdfs_path, local_path)
else:
base_command = 'hadoop fs -get \"%s\" \"%s\" ' % (hdfs_path, local_path)
exit_code, stdo, stde = _hdfs_exec_command(base_command)
if exit_code != 0:
raise RuntimeError("Failed to get %s -> %s: %s" % (hdfs_path, local_path, stde))
if not os.path.exists(local_path):
raise RuntimeError('local file %s not found' % local_path)
def hdfs_touch(path, hadoop_conf_dir=None):
"""
Create an empty file in HDFS
"""
base_command = 'hadoop'
if hadoop_conf_dir:
base_command += ' --config %s' % hadoop_conf_dir
touchz_cmd = base_command + ' fs -touchz \"%s\"' % path
exit_code, stdo, stde = _hdfs_exec_command(touchz_cmd)
if exit_code != 0:
raise RuntimeError("error creating file '%s', error: %s" % (path, stde))
def hdfs_mkdir(dirname, hadoop_conf_dir=None):
'''
Create a new directory in HDFS
'''
if isinstance(dirname,list):
dirname = " ".join(dirname)
base_command = 'hadoop'
if hadoop_conf_dir:
base_command += ' --config %s' % hadoop_conf_dir
mkdir_cmd = base_command + ' fs -mkdir -p \"%s\"' % dirname
exit_code, stdo, stde = _hdfs_exec_command(mkdir_cmd)
if exit_code != 0:
raise RuntimeError("error making dir '%s', error: %s" % (dirname, stde))
chang_permission_cmd = base_command + ' fs -chmod a+rwx \"%s\"' % dirname
exit_code, stdo, stde = _hdfs_exec_command(chang_permission_cmd)
if exit_code != 0:
raise RuntimeError("error changing permissions %s" % stde)
def _intra_s3_copy_model(s3_src_path, s3_dest_path, aws_credentials = {}):
assert(is_s3_path(s3_src_path) and is_s3_path(s3_dest_path))
s3_src_bucket_name, s3_src_dir_name = parse_s3_path(s3_src_path)
s3_dest_bucket_name, s3_dest_dir_name = parse_s3_path(s3_dest_path)
conn = boto.connect_s3(**aws_credentials)
s3_src_bucket = conn.get_bucket(s3_src_bucket_name, validate=False)
s3_dest_bucket = conn.get_bucket(s3_dest_bucket_name, validate=False)
# Get a list of all keys to copy
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
keys_to_copy = s3_src_bucket.list(prefix=s3_src_dir_name)
break
except Exception as e:
num_retries = num_retries + 1
__logger__.info("Error hit while listing keys to S3: %s" % e)
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
for k in keys_to_copy:
k = k.name
file_name = os.path.basename(k)
new_key_name = s3_dest_dir_name + '/' + file_name
# Do the actual copy
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
s3_dest_bucket.copy_key(new_key_name, s3_src_bucket_name, k)
break
except Exception as e:
num_retries = num_retries + 1
__logger__.info("Error hit while listing keys to S3: %s" % e)
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
def intra_s3_copy_model(s3_src_path, s3_dest_path, is_dir=False, aws_credentials = {}):
'''
copy model from a source s3 path to the target s3 path. set 'is_dir' to True if you plan
to copy the directory. set 'is_dir' to False if you only plan to copy a single file.
the default value for 'is_dir' is False.
'''
assert(is_s3_path(s3_src_path) and is_s3_path(s3_dest_path))
# check if should use boto
if _use_boto():
_intra_s3_copy_model(s3_src_path, s3_dest_path, aws_credentials)
return
__logger__.info('Copying s3 path %s to s3 path %s' % (s3_src_path, s3_dest_path))
# Get a list of all keys to copy
num_retries = 0
while num_retries < __RETRY_TIMES:
try:
_awscli_s3_op('cp', s3_src_path, s3_dest_path, recursive=is_dir, aws_credentials=aws_credentials)
__logger__.info("Successfully copied from s3 path %s to s3 path %s" % (s3_src_path, s3_dest_path))
break
except Exception as e:
num_retries = num_retries + 1
__logger__.info("Error hit while copying model from %s to %s: %s" % (s3_src_path, s3_dest_path, e))
__logger__.info("Retrying %s out of %s" % (num_retries, __RETRY_TIMES))
if num_retries == __RETRY_TIMES:
raise e
time.sleep(__SLEEP_SECONDS_BETWEEN_RETRIES)
def s3_copy_model(src_path, s3_dest_path, aws_credentials = {}):
assert(is_local_path(src_path) and is_s3_path(s3_dest_path))
# check if should use boto
if _use_boto():
for base_file_name in os.listdir(src_path):
source_file = os.path.join(src_path, base_file_name)
file_dest = s3_dest_path + '/' + base_file_name
upload_to_s3(source_file, file_dest, aws_credentials=aws_credentials)
else:
upload_to_s3(src_path, s3_dest_path, is_dir=True, aws_credentials=aws_credentials)
def s3_recursive_delete(s3_path, aws_credentials = {}):
(s3_bucket_name, s3_key_prefix) = parse_s3_path(s3_path)
conn = boto.connect_s3(**aws_credentials)
bucket = conn.get_bucket(s3_bucket_name, validate=False)
matches = bucket.list(prefix=s3_key_prefix)
bucket.delete_keys([key.name for key in matches])
def expand_full_path(path):
'''Expand a relative path to a full path
For example,
'~/tmp' may be expanded to '/Users/username/tmp'
'abc/def' may be expanded to '/pwd/abc/def'
'''
return os.path.abspath(os.path.expanduser(path))
def _get_s3_key(s3_path, aws_credentials = {}):
'''Given S3 path, get the key object that represents the path'''
conn = boto.connect_s3(**aws_credentials)
(bucket_name, path) = parse_s3_path(s3_path)
bucket = conn.get_bucket(bucket_name, validate=False)
k = boto.s3.key.Key(bucket)
k.key = path
return k
def _s3_callback(complete, total):
if complete > 0:
__logger__.info('%d%% completed.' % int(1.0 * complete/total * 100))
def _awscli_s3_op(op, src, dst=None, recursive=False, silent=False, is_test=False,
aws_credentials = {}):
''' use AWS command line interface for any operations to s3 '''
# get aws credentials from input or environment variables
aws_access_key_id = None
aws_secret_access_key = None
if 'aws_access_key_id' in aws_credentials and 'aws_secret_access_key' in aws_credentials:
aws_access_key_id = aws_credentials['aws_access_key_id']
aws_secret_access_key = aws_credentials['aws_secret_access_key']
elif 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ:
aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']
# use subprocess to perform aws s3 ops
# "acl" grants the bucket owner full permission regardless of the uploader's account
arglist = ['aws', 's3']
if sys.platform == 'win32':
arglist[0] = arglist[0] + ".cmd"
if op == 'cp':
arglist.extend(['cp', src, dst, '--acl', 'bucket-owner-full-control'])
elif op == 'rm':
arglist.extend(['rm', src])
if recursive:
arglist.append('--recursive')
if is_test:
arglist.append('--dryrun')
if silent:
arglist.append('--quiet')
env = os.environ.copy()
if aws_access_key_id is not None and aws_secret_access_key is not None:
env['AWS_ACCESS_KEY_ID'] = aws_access_key_id
env['AWS_SECRET_ACCESS_KEY'] = aws_secret_access_key
if 'PYTHONEXECUTABLE' in env: # remove PYTHONEXECUTABLE for anaconda
del env['PYTHONEXECUTABLE']
# open subprocess
proc = subprocess.Popen(arglist, env=env, stderr=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
# read stdout/stderr and wait for subprocess to finish
prev = ''
while True:
line = proc.stdout.read(100)
if line != '':
# parse stdout from subprocess
line = line.replace('\r\n', '\n').replace('\r', '\n')
lines = line.split('\n')
if len(lines) == 1:
prev = prev + lines[0]
continue
else:
# print status for each file, and total progress
for index, l in enumerate(lines):
print_str = None
if index == 0:
print_str = prev + l
elif index == (len(lines) - 1):
prev = l
else:
print_str = l
if print_str:
_awscli_print(print_str)
else:
break
proc.wait()
# check for error
if proc.returncode != 0: # error
raise RuntimeError('AWS S3 operation failed')
def _awscli_print(line):
if line.startswith('Completed'): # progress bar
sys.stdout.write("\r%s" % line)
sys.stdout.flush()
else: # file path
sys.stdout.write('\r'+line+'\n')
def s3_test_url(s3_path, aws_credentials={}):
"""
Test if a given key exist in S3. Return True if exists, otherwise False
"""
conn = boto.connect_s3(**aws_credentials)
(bucket_name, path) = parse_s3_path(s3_path)
try:
bucket = conn.get_bucket(bucket_name, validate=True)
k = bucket.get_key(path, validate=True)
return k is not None
except S3ResponseError as e:
return False
def hdfs_test_url(hdfs_url,test='e',hadoop_conf_dir=None):
"""
Test the url.
parameters
----------
hdfs_url: string
The hdfs url
test: string, optional
'e': existence
'd': is a directory
'z': zero length
Default is an existence test, 'e'
"""
if hadoop_conf_dir == None:
command = "hadoop fs -test -%s %s" % (test, hdfs_url)
else:
command = "hadoop --config %s fs -test -%s %s" % (hadoop_conf_dir,test, hdfs_url)
exit_code, stdo, stde = _hdfs_exec_command(command)
return exit_code == 0
def _hdfs_exec_command(command, silent = True):
"""
Execute HDFS command. and return the exit status.
"""
if not silent:
__logger__.info("Running hdfs command: \n%s" % command)
pobj = subprocess.Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
stdo, stde = pobj.communicate()
exit_code = pobj.returncode
return exit_code, stdo, stde
def retry(tries=3, delay=1, backoff=1, retry_exception=None):
'''
Retries a function or method until it has reached the maximum retries
Parameters
-----------
tries : int, optional
the number of times this function will retry
delay : int, optional
the number of seconds in delay to retry
backoff : int, optional
the number of factors by which the delay should increase after a retry
retry_exception: Error, optional
the type of error that only will trigger retries. Defaults to None so
all types of error will trigger retries.
This is derived from the original implementation of retry at:
https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
'''
def deco_retry(f):
def f_retry(*args, **kargs):
mtries, mdelay = tries, delay # mutables
while mtries > 1:
try:
return f(*args, **kargs) # run function
except Exception as e:
if retry_exception and not isinstance(e, retry_exception):
break # break and return f if exception caught is not expected
mtries -= 1 # decrease retry
time.sleep(mdelay) # delay to next retry
mdelay *= backoff # increase delay
return f(*args, **kargs) # last retry
return f_retry
return deco_retry
| {
"content_hash": "318481ec3a3887a99818c19d8f9d200b",
"timestamp": "",
"source": "github",
"line_count": 840,
"max_line_length": 109,
"avg_line_length": 33.319047619047616,
"alnum_prop": 0.616407031584965,
"repo_name": "nkhuyu/SFrame",
"id": "64c5e4cfe9488816ade3872f0be2089f9cef7d89",
"size": "27988",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/util/file_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "144591"
},
{
"name": "C++",
"bytes": "11412408"
},
{
"name": "CMake",
"bytes": "102271"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24575"
},
{
"name": "Hack",
"bytes": "277"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "1947961"
},
{
"name": "R",
"bytes": "86286"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "48586"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
} |
#from sumatra.projects import load_project
#from sumatra.parameters import build_parameters
#from sumatra.decorators import capture
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
import csv
from omics_pipe.utils import *
from omics_pipe.modules.fastqc import fastqc
from omics_pipe.modules.star import star
from omics_pipe.modules.htseq import htseq
from omics_pipe.modules.RNAseq_report_counts import RNAseq_report_counts
from omics_pipe.parameters.default_parameters import default_parameters
p = Bunch(default_parameters)
os.chdir(p.WORKING_DIR)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
with open(p.DESEQ_META,"rb") as infile:
next(infile, None)
reader=csv.reader(infile)
samples = [x[0] for x in reader]
for step in p.STEPS:
vars()['inputList_' + step] = []
for sample in samples:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (p.FLAG_PATH, step, sample)])
@parallel(inputList_fastqc)
@check_if_uptodate(check_file_exists)
def run_fastqc(sample, fastqc_flag):
fastqc(sample, fastqc_flag)
return
@parallel(inputList_star)
@check_if_uptodate(check_file_exists)
def run_star(sample, star_flag):
star(sample, star_flag)
return
@follows(run_star)
@parallel(inputList_htseq)
@check_if_uptodate(check_file_exists)
def run_htseq(sample, htseq_flag):
htseq(sample, htseq_flag)
return
@follows(run_fastqc, run_htseq)
@parallel([["report", "%s/RNAseq_report_report_completed.flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
def run_RNAseq_report_counts(sample, RNAseq_report_counts_flag):
RNAseq_report_counts(sample, RNAseq_report_counts_flag)
return
@follows(run_RNAseq_report_counts)
@parallel([["combined", "%s/last_function_combined_completed.flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
def last_function(sample, last_function_flag):
print "PIPELINE HAS FINISHED SUCCESSFULLY!!! YAY!"
pipeline_graph_output = p.FLAG_PATH + "/pipeline_" + sample + "_" + str(date) + ".pdf"
pipeline_printout_graph (pipeline_graph_output,'pdf', step, no_key_legend=False)
stage = "last_function"
flag_file = "%s/%s_completed.flag" % (p.FLAG_PATH, stage)
open(flag_file, 'w').close()
return
if __name__ == '__main__':
pipeline_run(p.STEP, multiprocess = p.PIPE_MULTIPROCESS, verbose = p.PIPE_VERBOSE, gnu_make_maximal_rebuild_mode = p.PIPE_REBUILD)
| {
"content_hash": "ef91ff229c298472cc8462e026b5dff7",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 134,
"avg_line_length": 32.67948717948718,
"alnum_prop": 0.6916437818752452,
"repo_name": "adammaikai/OmicsPipe2.0",
"id": "f4a878774936531a17923247f077e221b4873d83",
"size": "2572",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib.linux-x86_64-2.7/omics_pipe/RNAseq_count_based.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9660"
},
{
"name": "Groff",
"bytes": "126"
},
{
"name": "Perl",
"bytes": "3396"
},
{
"name": "Python",
"bytes": "543104"
},
{
"name": "R",
"bytes": "342554"
},
{
"name": "Shell",
"bytes": "260672"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.functions.default import default
from params_linux import *
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False) | {
"content_hash": "dcb3119b700e8687426d26ef5cd74af5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 37.84,
"alnum_prop": 0.7970401691331924,
"repo_name": "alexryndin/ambari",
"id": "89ab726f0bd85e9e98c9e9eac0c63ddbefbfb6fb",
"size": "946",
"binary": false,
"copies": "4",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import logging
from django.db import migrations
from django.db.models import Q
logger = logging.getLogger(__name__)
def update_record(related_record, parent_record):
if not related_record.update_user:
related_record.update_user = parent_record.update_user
if not related_record.create_user:
related_record.create_user = parent_record.create_user
def fix_idir(apps, schema_editor):
LithologyDescription = apps.get_model('wells', 'LithologyDescription')
Casing = apps.get_model('wells', 'Casing')
Screen = apps.get_model('wells', 'Screen')
LinerPerforation = apps.get_model('wells', 'LinerPerforation')
DecommissionDescription = apps.get_model('wells', 'DecommissionDescription')
models = [LithologyDescription, Casing, Screen, LinerPerforation, DecommissionDescription]
for model in models:
logger.info('Fixing bad idir info on {}'.format(model))
well_count = 0
submission_count = 0
for related_record in model.objects.filter(Q(update_user='') | Q(create_user='')):
if related_record.activity_submission:
update_record(related_record, related_record.activity_submission)
submission_count += 1
elif related_record.well:
update_record(related_record, related_record.well)
well_count += 1
related_record.save()
logger.info('{} well linked records updated'.format(well_count))
logger.info('{} submission linked records updated'.format(submission_count))
def reverse(apps, schema_editor):
# There's no going back!
pass
class Migration(migrations.Migration):
dependencies = [
('wells', '0001_squashed_0079_auto_20190506_1959'),
]
operations = [
migrations.RunPython(fix_idir, reverse),
]
| {
"content_hash": "ee0db07fb26e7f67a27fdbae69b4e996",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 94,
"avg_line_length": 33.38181818181818,
"alnum_prop": 0.6672113289760349,
"repo_name": "bcgov/gwells",
"id": "b29a9a0746bff7e502d5cbd1bcdd2d0c7afcc4f9",
"size": "1884",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "app/backend/wells/migrations/0080_fix_bad_idir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "519"
},
{
"name": "Dockerfile",
"bytes": "4104"
},
{
"name": "Groovy",
"bytes": "89156"
},
{
"name": "HTML",
"bytes": "10079"
},
{
"name": "JavaScript",
"bytes": "271010"
},
{
"name": "Makefile",
"bytes": "807"
},
{
"name": "Python",
"bytes": "1550542"
},
{
"name": "SCSS",
"bytes": "7409"
},
{
"name": "Shell",
"bytes": "46319"
},
{
"name": "Vue",
"bytes": "833800"
}
],
"symlink_target": ""
} |
"""The Core Storage (CS) file system implementation."""
import pyfvde
from dfvfs.lib import cs_helper
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import cs_path_spec
from dfvfs.resolver import resolver
from dfvfs.vfs import cs_file_entry
from dfvfs.vfs import file_system
class CSFileSystem(file_system.FileSystem):
"""File system that uses pyfvde."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_CS
def __init__(self, resolver_context, path_spec):
"""Initializes a file system.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(CSFileSystem, self).__init__(resolver_context, path_spec)
self._fvde_volume = None
self._fvde_volume_group = None
self._file_object = None
def _Close(self):
"""Closes the file system.
Raises:
IOError: if the close failed.
"""
self._fvde_volume_group = None
self._fvde_volume.close()
self._fvde_volume = None
self._file_object = None
def _Open(self, mode='rb'):
"""Opens the file system defined by path specification.
Args:
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(self._path_spec)
file_object = resolver.Resolver.OpenFileObject(
self._path_spec.parent, resolver_context=self._resolver_context)
fvde_volume = pyfvde.volume()
encrypted_root_plist = resolver.Resolver.key_chain.GetCredential(
self._path_spec, 'encrypted_root_plist')
if encrypted_root_plist:
fvde_volume.read_encrypted_root_plist(encrypted_root_plist)
fvde_volume.open_file_object(file_object)
# TODO: implement multi physical volume support.
fvde_volume.open_physical_volume_files_as_file_objects([file_object])
fvde_volume_group = fvde_volume.get_volume_group()
self._file_object = file_object
self._fvde_volume = fvde_volume
self._fvde_volume_group = fvde_volume_group
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
volume_index = cs_helper.CSPathSpecGetVolumeIndex(path_spec)
# The virtual root file has no corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return (
0 <= volume_index < self._fvde_volume_group.number_of_logical_volumes)
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
CSFileEntry: a file entry or None if not available.
"""
volume_index = cs_helper.CSPathSpecGetVolumeIndex(path_spec)
# The virtual root file has no corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or location != self.LOCATION_ROOT:
return None
return cs_file_entry.CSFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if (volume_index < 0 or
volume_index >= self._fvde_volume_group.number_of_logical_volumes):
return None
return cs_file_entry.CSFileEntry(
self._resolver_context, self, path_spec)
def GetFVDELogicalVolumeByPathSpec(self, path_spec):
"""Retrieves a Core Storage logical volume for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyfvde.logical_volume: a Core Storage logical volume or None if not
available.
"""
volume_index = cs_helper.CSPathSpecGetVolumeIndex(path_spec)
if volume_index is None:
return None
return self._fvde_volume_group.get_logical_volume(volume_index)
def GetFVDEVolumeGroup(self):
"""Retrieves the Core Storage volume group.
Returns:
pyfvde.volume_group: a Core Storage volume group.
"""
return self._fvde_volume_group
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
CSFileEntry: root file entry or None if not available.
"""
path_spec = cs_path_spec.CSPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec)
| {
"content_hash": "25101ce7dea917007681e989814cef1b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 30.981366459627328,
"alnum_prop": 0.6884522854851644,
"repo_name": "joachimmetz/dfvfs",
"id": "b46d94474f0d34f98b087ebbae3a8e0f954118f7",
"size": "5012",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dfvfs/vfs/cs_file_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
"""HydraTK extensions implementation class
.. module:: core.extension
:platform: Unix
:synopsis: HydraTK extensions implementation class
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
class Extension(object):
"""Class Extension
"""
_ext_id = 'Undefined'
_ext_name = 'Undefined'
_ext_version = 'Undefined'
_ext_author = 'Undefined'
_ext_year = 'Undefined'
''' MasterHead object reference '''
_mh = None
def __getattr__(self, name):
"""Method gets required MasterHead attribute
Subclass must have enabled attribute _wrap_hydra_attrs
Args:
name (str): attribute name
Returns:
obj: attribute value
Raises:
error: AttributeError
"""
if hasattr(self, '_wrap_hydra_attrs') and self._wrap_hydra_attrs == True:
if hasattr(self._mh, name):
return self._mh.__dict__[name]
raise AttributeError(
"'module' object has no attribute '{0}'".format(name))
def __init__(self, core_instance=None):
"""Class constructor
Called when object is initialized
Set extensions metadata
If implemented in subclass - check dependencies, import modules, register actions
Args:
core_instance (obj): CoreHead reference
"""
self._mh = core_instance
self._init_extension()
if hasattr(self.__class__, '_check_dependencies') and callable(getattr(self.__class__, '_check_dependencies')):
self._check_dependencies()
if hasattr(self.__class__, '_do_imports') and callable(getattr(self.__class__, '_do_imports')):
self._do_imports()
if hasattr(self.__class__, '_register_actions') and callable(getattr(self.__class__, '_register_actions')):
self._register_actions()
def get_ext_name(self):
"""Method gets extension name
Args:
none
Returns:
str: name
"""
return self._ext_name
def get_ext_version(self):
"""Method gets extension version
Args:
none
Returns:
str: version
"""
return self._ext_version
def get_ext_author(self):
"""Method gets extension author
Args:
none
Returns:
str: author
"""
return self._ext_author
def get_ext_info(self):
"""Method gets extension summary info
Args:
none
Returns:
str: info
"""
return self._ext_name + ' v' + self._ext_version + ' (c) [' + self._ext_year + ' ' + self._ext_author + ']'
| {
"content_hash": "36c3804185d7a69522d1fc7b54ea8286",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 119,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.5456197746274082,
"repo_name": "hydratk/hydratk",
"id": "5da82bd3ca9eedc4e4538e1f9a7af2fb4f1308c2",
"size": "2775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hydratk/core/extension.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "444574"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
import unittest
from unittest import skipUnless
from django.db import connection
from django.contrib.gis import gdal
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, no_mysql, no_oracle, no_spatialite,
mysql, oracle, postgis, spatialite)
from django.test import TestCase
from django.utils import six
if HAS_GEOS:
from django.contrib.gis.geos import (fromstr, GEOSGeometry,
Point, LineString, LinearRing, Polygon, GeometryCollection)
from .models import Country, City, PennsylvaniaCity, State, Track
if HAS_GEOS and not spatialite:
from .models import Feature, MinusOneSRID
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoModelTest(TestCase):
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
## Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
## Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertEqual(True, isinstance(ns.poly.ogr, gdal.OGRGeometry))
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertEqual(True, isinstance(ns.poly.srs, gdal.SpatialReference))
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@no_mysql
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157)) FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)' # Used ogr.py in gdal 1.4.1 for this transform
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
# SpatiaLite does not support missing SRID values.
if not spatialite:
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
@no_spatialite # SpatiaLite does not support abstract geometry columns
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertEqual(True, isinstance(f_1.geom, Point))
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertEqual(True, isinstance(f_2.geom, LineString))
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertEqual(True, isinstance(f_3.geom, Polygon))
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertEqual(True, isinstance(f_4.geom, GeometryCollection))
self.assertEqual(f_3.geom, f_4.geom[2])
@no_mysql
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertTrue(isinstance(cities2[0].point, Point))
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoLookupTest(TestCase):
@no_mysql
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if not oracle:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertEqual(True, c.name in cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not spatialite:
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(0, len(Country.objects.filter(mpoly__contains=pueblo.point))) # Query w/GEOSGeometry object
self.assertEqual((mysql and 1) or 0,
len(Country.objects.filter(mpoly__contains=okcity.point.wkt))) # Qeury w/WKT
# OK City is contained w/in bounding box of Texas.
if not oracle:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
# Only PostGIS has `left` and `right` lookup types.
@no_mysql
@no_oracle
@no_spatialite
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
test_left_right_lookups = unittest.expectedFailure(test_left_right_lookups)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@no_mysql
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertEqual(True, 'Colorado' in state_names)
self.assertEqual(True, 'Kansas' in state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertEqual(None, State.objects.get(name='Northern Mariana Islands').poly)
@no_mysql
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoQuerySetTest(TestCase):
# Please keep the tests in GeoQuerySet method's alphabetic order
@no_mysql
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertEqual(True, s.poly.centroid.equals_exact(s.centroid, tol))
@no_mysql
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwey with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
self.assertEqual(c.mpoly.sym_difference(geom), c.sym_difference)
self.assertEqual(c.mpoly.union(geom), c.union)
@skipUnless(getattr(connection.ops, 'envelope', False), 'Database does not support envelope operation')
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@no_mysql
@no_spatialite # SpatiaLite does not have an Extent function
def test_extent(self):
"Testing the `extent` GeoQuerySet method."
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.extent()
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
@no_mysql
@no_oracle
@no_spatialite
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@no_mysql
@no_oracle
@no_spatialite
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
if not connection.ops.geohash:
return
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS 1.3.4+ and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.305196,48.462611]}'
chicago_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
if postgis and connection.ops.spatial_version < (1, 4, 0):
pueblo_json = '{"type":"Point","coordinates":[-104.60925200,38.25500100]}'
houston_json = '{"type":"Point","crs":{"type":"EPSG","properties":{"EPSG":4326}},"coordinates":[-95.36315100,29.76337400]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.30519600,48.46261100]}'
elif spatialite:
victoria_json = '{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],"coordinates":[-123.305196,48.462611]}'
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Victoria';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# 1.(3|4).x: SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(chicago_json, City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson)
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
if mysql or (spatialite and not connection.ops.gml):
self.assertRaises(NotImplementedError, Country.objects.all().gml, field_name='mpoly')
return
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml"><gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ </gml:coordinates></gml:Point>')
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>')
else:
gml_regex = re.compile(r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>')
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
# PostGIS < 1.5 doesn't include dimension im GMLv3 output.
if postgis and connection.ops.spatial_version >= (1, 5, 0):
self.assertIn('<gml:pos srsDimension="2">',
City.objects.gml(version=3).get(name='Pueblo').gml)
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Only PostGIS and Spatialite (>=2.4.0-RC4) support KML serialization
if not (postgis or (spatialite and connection.ops.kml)):
self.assertRaises(NotImplementedError, State.objects.all().kml, field_name='poly')
return
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# The reference KML depends on the version of PostGIS used
# (the output stopped including altitude in 1.3.3).
if connection.ops.spatial_version >= (1, 3, 3):
ref_kml = '<Point><coordinates>-104.609252,38.255001</coordinates></Point>'
else:
ref_kml = '<Point><coordinates>-104.609252,38.255001,0</coordinates></Point>'
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual(ref_kml, ptown.kml)
# Only PostGIS has support for the MakeLine aggregate.
@no_mysql
@no_oracle
@no_spatialite
def test_make_line(self):
"Testing the `make_line` GeoQuerySet method."
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry('LINESTRING(-95.363151 29.763374,-96.801611 32.782057,-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)', srid=4326)
self.assertEqual(ref_line, City.objects.make_line())
@no_mysql
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0
# will return None.
if postgis and connection.ops.spatial_version < (2, 0, 0):
self.assertIsNone(c.num_geom)
else:
self.assertEqual(1, c.num_geom)
@no_mysql
@no_spatialite # SpatiaLite can only count vertices in LineStrings
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@no_mysql
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05)) FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
elif postgis or spatialite:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertEqual(True, ref[c.name].equals_exact(c.point_on_surface, tol))
@no_mysql
@no_spatialite
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@no_mysql
@no_oracle
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@no_mysql
@no_oracle
@no_spatialite
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid, tol))
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
if mysql or oracle:
self.assertRaises(NotImplementedError, City.objects.svg)
return
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@no_mysql
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@no_mysql
@no_oracle
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@no_mysql
@no_oracle
def test_unionagg(self):
"Testing the `unionagg` (aggregate union) GeoQuerySet method."
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Oracle has different order.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
tol = 0.00001
if oracle:
union = union2
else:
union = union1
self.assertEqual(True, union.equals_exact(u1, tol))
self.assertEqual(True, union.equals_exact(u2, tol))
qs = City.objects.filter(name='NotACity')
self.assertEqual(None, qs.unionagg(field_name='point'))
def test_non_concrete_field(self):
pkfield = City._meta.get_field_by_name('id')[0]
orig_pkfield_col = pkfield.column
pkfield.column = None
try:
list(City.objects.all())
finally:
pkfield.column = orig_pkfield_col
| {
"content_hash": "4ac5cf576b1016b8ccbce970f5eb49d8",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 227,
"avg_line_length": 46.91861898890259,
"alnum_prop": 0.6205355969619721,
"repo_name": "Lightmatter/django-inlineformfield",
"id": "0d43f63eedbb676a8ca825b2be81b57a86b711b0",
"size": "38051",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/django/contrib/gis/tests/geoapp/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
} |
'''
A flexible renderer that takes a templating engine and a data format
:maintainer: Jack Kuan <kjkuan@gmail.com>
:maturity: new
:platform: all
'''
# See http://docs.saltstack.org/en/latest/ref/renderers/all/salt.renderers.stateconf.html
# for a guide to using this module.
#
# FIXME: I really need to review and simplify this renderer, it's getting out of hand!
#
# TODO:
# - sls meta/info state: E.g.,
#
# sls_info:
# stateconf.set:
# - author: Jack Kuan
# - description: what the salt file does...
# - version: 0.1.0
#
# - version constraint for 'include'. E.g.,
#
# include:
# - apache: >= 0.1.0
#
# Import python libs
from __future__ import absolute_import
import logging
import re
import getopt
import copy
from os import path as ospath
# Import salt libs
import salt.utils
from salt.exceptions import SaltRenderError
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import StringIO # pylint: disable=import-error
__all__ = ['render']
log = logging.getLogger(__name__)
__opts__ = {
'stateconf_end_marker': r'#\s*-+\s*end of state config\s*-+',
# e.g., something like "# --- end of state config --" works by default.
'stateconf_start_state': '.start',
# name of the state id for the generated start state.
'stateconf_goal_state': '.goal',
# name of the state id for the generated goal state.
'stateconf_state_func': 'stateconf.set'
# names the state and the state function to be recognized as a special
# state from which to gather sls file context variables. It should be
# specified in the 'state.func' notation, and both the state module and
# the function must actually exist and the function should be a dummy,
# no-op state function that simply returns a
# dict(name=name, result=True, changes={}, comment='')
}
STATE_FUNC = STATE_NAME = ''
def __init__(opts):
global STATE_NAME, STATE_FUNC
STATE_FUNC = __opts__['stateconf_state_func']
STATE_NAME = STATE_FUNC.split('.')[0]
MOD_BASENAME = ospath.basename(__file__)
INVALID_USAGE_ERROR = SaltRenderError(
'Invalid use of {0} renderer!\n'
'''Usage: #!{1} [-GoSp] [<data_renderer> [options] . <template_renderer> [options]]
where an example <data_renderer> would be yaml and a <template_renderer> might
be jinja. Each renderer can be passed its renderer specific options.
Options(for this renderer):
-G Do not generate the goal state that requires all other states in the sls.
-o Indirectly order the states by adding requires such that they will be
executed in the order they are defined in the sls. Implies using yaml -o.
-s Generate the start state that gets inserted as the first state in
the sls. This only makes sense if your high state data dict is ordered.
-p Assume high state input. This option allows you to pipe high state data
through this renderer. With this option, the use of stateconf.set state
in the sls will have no effect, but other features of the renderer still
apply.
'''.format(MOD_BASENAME, MOD_BASENAME)
)
def render(input, saltenv='base', sls='', argline='', **kws):
gen_start_state = False
no_goal_state = False
implicit_require = False
def process_sls_data(data, context=None, extract=False):
sls_dir = ospath.dirname(sls.replace('.', ospath.sep)) if '.' in sls else sls
ctx = dict(sls_dir=sls_dir if sls_dir else '.')
if context:
ctx.update(context)
tmplout = render_template(
StringIO(data), saltenv, sls, context=ctx,
argline=rt_argline.strip(), **kws
)
high = render_data(tmplout, saltenv, sls, argline=rd_argline.strip())
return process_high_data(high, extract)
def process_high_data(high, extract):
# make a copy so that the original, un-preprocessed highstate data
# structure can be used later for error checking if anything goes
# wrong during the preprocessing.
data = copy.deepcopy(high)
try:
rewrite_single_shorthand_state_decl(data)
rewrite_sls_includes_excludes(data, sls, saltenv)
if not extract and implicit_require:
sid = has_names_decls(data)
if sid:
raise SaltRenderError(
'\'names\' declaration(found in state id: {0}) is '
'not supported with implicitly ordered states! You '
'should generate the states in a template for-loop '
'instead.'.format(sid)
)
add_implicit_requires(data)
if gen_start_state:
add_start_state(data, sls)
if not extract and not no_goal_state:
add_goal_state(data)
rename_state_ids(data, sls)
# We must extract no matter what so extending a stateconf sls file
# works!
extract_state_confs(data)
except SaltRenderError:
raise
except Exception as err:
log.exception(
'Error found while pre-processing the salt file '
'{0}:\n{1}'.format(sls, err)
)
from salt.state import State
state = State(__opts__)
errors = state.verify_high(high)
if errors:
raise SaltRenderError('\n'.join(errors))
raise SaltRenderError('sls preprocessing/rendering failed!')
return data
# ----------------------
renderers = kws['renderers']
opts, args = getopt.getopt(argline.split(), 'Gosp')
argline = ' '.join(args) if args else 'yaml . jinja'
if ('-G', '') in opts:
no_goal_state = True
if ('-o', '') in opts:
implicit_require = True
if ('-s', '') in opts:
gen_start_state = True
if ('-p', '') in opts:
data = process_high_data(input, extract=False)
else:
# Split on the first dot surrounded by spaces but not preceded by a
# backslash. A backslash preceded dot will be replaced with just dot.
args = [
arg.strip().replace('\\.', '.')
for arg in re.split(r'\s+(?<!\\)\.\s+', argline, 1)
]
try:
name, rd_argline = (args[0] + ' ').split(' ', 1)
render_data = renderers[name] # e.g., the yaml renderer
if implicit_require:
if name == 'yaml':
rd_argline = '-o ' + rd_argline
else:
raise SaltRenderError(
'Implicit ordering is only supported if the yaml renderer '
'is used!'
)
name, rt_argline = (args[1] + ' ').split(' ', 1)
render_template = renderers[name] # e.g., the mako renderer
except KeyError as err:
raise SaltRenderError('Renderer: {0} is not available!'.format(err))
except IndexError:
raise INVALID_USAGE_ERROR
if isinstance(input, six.string_types):
with salt.utils.fopen(input, 'r') as ifile:
sls_templ = ifile.read()
else: # assume file-like
sls_templ = input.read()
# first pass to extract the state configuration
match = re.search(__opts__['stateconf_end_marker'], sls_templ)
if match:
process_sls_data(sls_templ[:match.start()], extract=True)
# if some config has been extracted then remove the sls-name prefix
# of the keys in the extracted stateconf.set context to make them easier
# to use in the salt file.
if STATE_CONF:
tmplctx = STATE_CONF.copy()
if tmplctx:
prefix = sls + '::'
for k in six.iterkeys(tmplctx): # iterate over a copy of keys
if k.startswith(prefix):
tmplctx[k[len(prefix):]] = tmplctx[k]
del tmplctx[k]
else:
tmplctx = {}
# do a second pass that provides the extracted conf as template context
data = process_sls_data(sls_templ, tmplctx)
if log.isEnabledFor(logging.DEBUG):
import pprint # FIXME: pprint OrderedDict
log.debug('Rendered sls: {0}'.format(pprint.pformat(data)))
return data
def has_names_decls(data):
for sid, _, _, args in statelist(data):
if sid == 'extend':
continue
for _ in nvlist(args, ['names']):
return sid
def rewrite_single_shorthand_state_decl(data): # pylint: disable=C0103
'''
Rewrite all state declarations that look like this::
state_id_decl:
state.func
into::
state_id_decl:
state.func: []
'''
for sid, states in six.iteritems(data):
if isinstance(states, six.string_types):
data[sid] = {states: []}
def rewrite_sls_includes_excludes(data, sls, saltenv):
# if the path of the included/excluded sls starts with a leading dot(.)
# then it's taken to be relative to the including/excluding sls.
for sid in data:
if sid == 'include':
includes = data[sid]
for i, each in enumerate(includes):
if isinstance(each, dict):
slsenv, incl = each.popitem()
else:
slsenv = saltenv
incl = each
if incl.startswith('.'):
includes[i] = {slsenv: _relative_to_abs_sls(incl, sls)}
elif sid == 'exclude':
for sdata in data[sid]:
if 'sls' in sdata and sdata['sls'].startswith('.'):
sdata['sls'] = _relative_to_abs_sls(sdata['sls'], sls)
def _local_to_abs_sid(sid, sls): # id must starts with '.'
if '::' in sid:
return _relative_to_abs_sls(sid, sls)
else:
abs_sls = _relative_to_abs_sls(sid, sls + '.')
return '::'.join(abs_sls.rsplit('.', 1))
def _relative_to_abs_sls(relative, sls):
'''
Convert ``relative`` sls reference into absolute, relative to ``sls``.
'''
levels, suffix = re.match(r'^(\.+)(.*)$', relative).groups()
level_count = len(levels)
p_comps = sls.split('.')
if level_count > len(p_comps):
raise SaltRenderError(
'Attempted relative include goes beyond top level package'
)
return '.'.join(p_comps[:-level_count] + [suffix])
def nvlist(thelist, names=None):
'''
Given a list of items::
- whatever
- name1: value1
- name2:
- key: value
- key: value
return a generator that yields each (item, key, value) tuple, skipping
items that are not name-value's(dictionaries) or those not in the
list of matching names. The item in the returned tuple is the single-key
dictionary.
'''
# iterate over the list under the state dict.
for nvitem in thelist:
if isinstance(nvitem, dict):
# then nvitem is a name-value item(a dict) of the list.
name, value = next(six.iteritems(nvitem))
if names is None or name in names:
yield nvitem, name, value
def nvlist2(thelist, names=None):
'''
Like nvlist but applied one more time to each returned value.
So, given a list, args, of arguments to a state like this::
- name: echo test
- cwd: /
- require:
- file: test.sh
nvlist2(args, ['require']) would yield the tuple,
(dict_item, 'file', 'test.sh') where dict_item is the single-key
dictionary of {'file': 'test.sh'}.
'''
for _, _, value in nvlist(thelist, names):
for each in nvlist(value):
yield each
def statelist(states_dict, sid_excludes=frozenset(['include', 'exclude'])):
for sid, states in six.iteritems(states_dict):
if sid.startswith('__'):
continue
if sid in sid_excludes:
continue
for sname, args in six.iteritems(states):
if sname.startswith('__'):
continue
yield sid, states, sname, args
REQUISITES = set([
'require', 'require_in', 'watch', 'watch_in', 'use', 'use_in', 'listen', 'listen_in'
])
def rename_state_ids(data, sls, is_extend=False):
# if the .sls file is salt://my/salt/file.sls
# then rename all state ids defined in it that start with a dot(.) with
# "my.salt.file::" + the_state_id_without_the_first_dot.
# update "local" references to the renamed states.
if 'extend' in data and not is_extend:
rename_state_ids(data['extend'], sls, True)
for sid, _, _, args in statelist(data):
for req, sname, sid in nvlist2(args, REQUISITES):
if sid.startswith('.'):
req[sname] = _local_to_abs_sid(sid, sls)
for sid in data:
if sid.startswith('.'):
newsid = _local_to_abs_sid(sid, sls)
if newsid in data:
raise SaltRenderError(
'Can\'t rename state id({0}) into {1} because the later '
'already exists!'.format(sid, newsid)
)
# add a '- name: sid' to those states without '- name'.
for sname, args in six.iteritems(data[sid]):
if state_name(sname) == STATE_NAME:
continue
for arg in args:
if isinstance(arg, dict) and next(iter(arg)) == 'name':
break
else:
# then no '- name: ...' is defined in the state args
# add the sid without the leading dot as the name.
args.insert(0, dict(name=sid[1:]))
data[newsid] = data[sid]
del data[sid]
REQUIRE = set(['require', 'watch', 'listen'])
REQUIRE_IN = set(['require_in', 'watch_in', 'listen_in'])
EXTENDED_REQUIRE = {}
EXTENDED_REQUIRE_IN = {}
from itertools import chain
# To avoid cycles among states when each state requires the one before it:
# explicit require/watch/listen can only contain states before it
# explicit require_in/watch_in/listen_in can only contain states after it
def add_implicit_requires(data):
def T(sid, state): # pylint: disable=C0103
return '{0}:{1}'.format(sid, state_name(state))
states_before = set()
states_after = set()
for sid in data:
for state in data[sid]:
states_after.add(T(sid, state))
prev_state = (None, None) # (state_name, sid)
for sid, states, sname, args in statelist(data):
if sid == 'extend':
for esid, _, _, eargs in statelist(states):
for _, rstate, rsid in nvlist2(eargs, REQUIRE):
EXTENDED_REQUIRE.setdefault(
T(esid, rstate), []).append((None, rstate, rsid))
for _, rstate, rsid in nvlist2(eargs, REQUIRE_IN):
EXTENDED_REQUIRE_IN.setdefault(
T(esid, rstate), []).append((None, rstate, rsid))
continue
tag = T(sid, sname)
states_after.remove(tag)
reqs = nvlist2(args, REQUIRE)
if tag in EXTENDED_REQUIRE:
reqs = chain(reqs, EXTENDED_REQUIRE[tag])
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_after:
raise SaltRenderError(
'State({0}) can\'t require/watch/listen a state({1}) defined '
'after it!'.format(tag, T(rsid, rstate))
)
reqs = nvlist2(args, REQUIRE_IN)
if tag in EXTENDED_REQUIRE_IN:
reqs = chain(reqs, EXTENDED_REQUIRE_IN[tag])
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_before:
raise SaltRenderError(
'State({0}) can\'t require_in/watch_in/listen_in a state({1}) '
'defined before it!'.format(tag, T(rsid, rstate))
)
# add a (- state: sid) item, at the beginning of the require of this
# state if there's a state before this one.
if prev_state[0] is not None:
try:
next(nvlist(args, ['require']))[2].insert(0, dict([prev_state]))
except StopIteration: # i.e., there's no require
args.append(dict(require=[dict([prev_state])]))
states_before.add(tag)
prev_state = (state_name(sname), sid)
def add_start_state(data, sls):
start_sid = __opts__['stateconf_start_state']
if start_sid in data:
raise SaltRenderError(
'Can\'t generate start state({0})! The same state id already '
'exists!'.format(start_sid)
)
if not data:
return
# the start state is either the first state whose id declaration has
# no __sls__, or it's the first state whose id declaration has a
# __sls__ == sls.
non_sids = set(['include', 'exclude', 'extend'])
for sid, states in six.iteritems(data):
if sid in non_sids or sid.startswith('__'):
continue
if '__sls__' not in states or states['__sls__'] == sls:
break
else:
raise SaltRenderError('Can\'t determine the first state in the sls file!')
reqin = {state_name(next(six.iterkeys(data[sid]))): sid}
data[start_sid] = {STATE_FUNC: [{'require_in': [reqin]}]}
def add_goal_state(data):
goal_sid = __opts__['stateconf_goal_state']
if goal_sid in data:
raise SaltRenderError(
'Can\'t generate goal state({0})! The same state id already '
'exists!'.format(goal_sid)
)
else:
reqlist = []
for sid, states, state, _ in \
statelist(data, set(['include', 'exclude', 'extend'])):
if '__sls__' in states:
# Then id declaration must have been included from a
# rendered sls. Currently, this is only possible with
# pydsl's high state output.
continue
reqlist.append({state_name(state): sid})
data[goal_sid] = {STATE_FUNC: [dict(require=reqlist)]}
def state_name(sname):
'''
Return the name of the state regardless if sname is
just the state name or a state.func name.
'''
return sname.split('.', 1)[0]
# Quick and dirty way to get attribute access for dictionary keys.
# So, we can do: ${apache.port} instead of ${apache['port']} when possible.
class Bunch(dict):
def __getattr__(self, name):
return self[name]
# With sls:
#
# state_id:
# stateconf.set:
# - name1: value1
#
# STATE_CONF is:
# { state_id => {name1: value1} }
#
STATE_CONF = {} # stateconf.set
STATE_CONF_EXT = {} # stateconf.set under extend: ...
def extract_state_confs(data, is_extend=False):
for state_id, state_dict in six.iteritems(data):
if state_id == 'extend' and not is_extend:
extract_state_confs(state_dict, True)
continue
if STATE_NAME in state_dict:
key = STATE_NAME
elif STATE_FUNC in state_dict:
key = STATE_FUNC
else:
continue
to_dict = STATE_CONF_EXT if is_extend else STATE_CONF
conf = to_dict.setdefault(state_id, Bunch())
for sdk in state_dict[key]:
if not isinstance(sdk, dict):
continue
key, val = next(six.iteritems(sdk))
conf[key] = val
if not is_extend and state_id in STATE_CONF_EXT:
extend = STATE_CONF_EXT[state_id]
for requisite in 'require', 'watch', 'listen':
if requisite in extend:
extend[requisite] += to_dict[state_id].get(requisite, [])
to_dict[state_id].update(STATE_CONF_EXT[state_id])
| {
"content_hash": "cc2740f3d71bc4b509ceb6fdefa7d1aa",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 89,
"avg_line_length": 34.510416666666664,
"alnum_prop": 0.5700774725827548,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "c306950e21c0c06e2a0d31cfab528cc1cfe8e203",
"size": "19902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/renderers/stateconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import warnings
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import HiddenInput, MultiWidget
from django.utils.deprecation import RemovedInDjango31Warning
from django.utils.translation import gettext_lazy as _
__all__ = [
'BaseRangeField', 'IntegerRangeField', 'DecimalRangeField',
'DateTimeRangeField', 'DateRangeField', 'FloatRangeField',
'HiddenRangeWidget', 'RangeWidget',
]
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super().__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
class HiddenRangeWidget(RangeWidget):
"""A widget that splits input into two <input type="hidden"> inputs."""
def __init__(self, attrs=None):
super().__init__(HiddenInput, attrs)
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
hidden_widget = HiddenRangeWidget
def __init__(self, **kwargs):
if 'widget' not in kwargs:
kwargs['widget'] = RangeWidget(self.base_field.widget)
if 'fields' not in kwargs:
kwargs['fields'] = [self.base_field(required=False), self.base_field(required=False)]
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super().__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class DecimalRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.DecimalField
range_type = NumericRange
class FloatRangeField(DecimalRangeField):
base_field = forms.FloatField
def __init__(self, **kwargs):
warnings.warn(
'FloatRangeField is deprecated in favor of DecimalRangeField.',
RemovedInDjango31Warning, stacklevel=2,
)
super().__init__(**kwargs)
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
| {
"content_hash": "aa2c7fed9fd4fb00b65c189f7b40f35e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 97,
"avg_line_length": 32.0940170940171,
"alnum_prop": 0.6314247669773635,
"repo_name": "mdworks2016/work_development",
"id": "1e3011caad34f6f8bbe53414b39ce7fa670cc1d3",
"size": "3755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/contrib/postgres/forms/ranges.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
} |
import fcntl
import hashlib
import os
import shutil
import subprocess
import sys
# Allow use of this module even if termcolor is missing. There are many
# standalone python scripts in build_tools that can be run directly without
# PYTHONPATH set (i.e. not via build/python_wrapper that adds this path.
# TODO(sbc): we should probably just assume that all the module dependencies
# are present.
try:
import termcolor
except ImportError:
termcolor = None
from naclports import error, paths
GS_URL = 'http://storage.googleapis.com/'
GS_BUCKET = 'naclports'
GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET)
# Require the latest version of the NaCl SDK. naclports is built
# and tested against the pepper_canary release. To build aginst older
# versions of the SDK use the one of the pepper_XX branches (or use
# --skip-sdk-version-check).
MIN_SDK_VERSION = 43
arch_to_pkgarch = {
'x86_64': 'x86-64',
'i686': 'i686',
'arm': 'arm',
'pnacl': 'pnacl',
'emscripten': 'emscripten',
}
# Inverse of arch_to_pkgarch
pkgarch_to_arch = {v:k for k, v in arch_to_pkgarch.items()}
LOG_ERROR = 0
LOG_WARN = 1
LOG_INFO = 2
LOG_VERBOSE = 3
LOG_TRACE = 4
log_level = LOG_INFO
color_mode = 'auto'
def Color(message, color):
if termcolor and Color.enabled:
return termcolor.colored(message, color)
else:
return message
def CheckStdoutForColorSupport():
if color_mode == 'auto':
Color.enabled = sys.stdout.isatty()
def Memoize(f):
"""Memoization decorator for functions taking one or more arguments."""
class Memo(dict):
def __init__(self, f):
super(Memo, self).__init__()
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return Memo(f)
def SetVerbose(enabled):
if enabled:
SetLogLevel(LOG_VERBOSE)
else:
SetLogLevel(LOG_INFO)
def SetLogLevel(verbosity):
global log_level
log_level = verbosity
def Log(message, verbosity=LOG_INFO):
"""Log a message to the console (stdout)."""
if log_level < verbosity:
return
sys.stdout.write(str(message) + '\n')
sys.stdout.flush()
def LogHeading(message, suffix=''):
"""Log a colored/highlighted message with optional suffix."""
if Color.enabled:
Log(Color(message, 'green') + suffix)
else:
if log_level > LOG_WARN:
# When running in verbose mode make sure heading standout
Log('###################################################################')
Log(message + suffix)
Log('###################################################################')
else:
Log(message + suffix)
def Warn(message):
Log('warning: ' + message, LOG_WARN)
def Trace(message):
Log(message, LOG_TRACE)
def LogVerbose(message):
Log(message, LOG_VERBOSE)
def FindInPath(command_name):
"""Search user's PATH for a given executable.
Returns:
Full path to executable.
"""
extensions = ('',)
if not os.path.splitext(command_name)[1] and os.name == 'nt':
extensions = ('.bat', '.com', '.exe')
for path in os.environ.get('PATH', '').split(os.pathsep):
for ext in extensions:
full_name = os.path.join(path, command_name + ext)
if os.path.exists(full_name) and os.path.isfile(full_name):
return full_name
raise error.Error('command not found: %s' % command_name)
def DownloadFile(filename, url):
"""Download a file from a given URL.
Args:
filename: the name of the file to download the URL to.
url: then URL to fetch.
"""
temp_filename = filename + '.partial'
# Ensure curl is in user's PATH
FindInPath('curl')
curl_cmd = ['curl', '--fail', '--location', '--stderr', '-',
'-o', temp_filename]
if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()):
# Add --progress-bar but only if stdout is a TTY device.
curl_cmd.append('--progress-bar')
else:
# otherwise suppress status output, since curl always assumes its
# talking to a TTY and writes \r and \b characters. But add
# --show-error so that when curl fails it at least prints something.
curl_cmd += ['--silent', '--show-error']
curl_cmd.append(url)
if log_level > LOG_WARN:
Log('Downloading: %s [%s]' % (url, filename))
else:
Log('Downloading: %s' % url.replace(GS_URL, ''))
try:
subprocess.check_call(curl_cmd)
except subprocess.CalledProcessError as e:
raise error.Error('Error downloading file: %s' % str(e))
os.rename(temp_filename, filename)
def CheckStamp(filename, contents=None):
"""Check that a given stamp file is up-to-date.
Returns: False is the file does not exists or is older that that given
comparison file, or does not contain the given contents. True otherwise.
"""
if not os.path.exists(filename):
return False
if contents is not None:
with open(filename) as f:
if not f.read().startswith(contents):
return False
return True
@Memoize
def GetSDKRoot():
"""Returns the root of the currently configured Native Client SDK."""
root = os.environ.get('NACL_SDK_ROOT')
if root is None:
local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk')
if os.path.exists(local_sdk_root):
root = local_sdk_root
else:
raise error.Error('$NACL_SDK_ROOT not set')
if sys.platform == "cygwin":
root = root.replace('\\', '/')
return root
@Memoize
def GetEmscriptenRoot():
emscripten = os.environ.get('EMSCRIPTEN')
if emscripten is None:
local_root = os.path.join(paths.OUT_DIR, 'emsdk_portable', 'emscripten',
'master')
if os.path.exists(local_root):
emscripten = local_root
else:
raise error.Error('$EMSCRIPTEN not set')
if not os.path.isdir(emscripten):
raise error.Error('$EMSCRIPTEN environment variable does not point'
' to a directory: %s' % emscripten)
return emscripten
@Memoize
def GetSDKVersion():
"""Returns the version (as a string) of the current SDK."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-version']).strip()
return version
def CheckSDKVersion(version):
"""Returns True if the currently configured SDK is 'version' or above."""
return int(GetSDKVersion()) >= int(version)
@Memoize
def GetSDKRevision():
"""Returns the revision of the currently configured Native Client SDK."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-revision']).strip()
return int(version)
@Memoize
def GetPlatform():
"""Returns the current platform name according getos.py."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
platform = subprocess.check_output([getos]).strip()
return platform
@Memoize
def GetInstallRoot(config):
"""Returns the toolchain folder for a given NaCl toolchain."""
if config.toolchain == 'emscripten':
return os.path.join(GetEmscriptenRoot(), 'system', 'local')
platform = GetPlatform()
if config.toolchain == 'pnacl':
tc_dir = os.path.join('%s_pnacl' % platform, 'le32-nacl')
else:
tc_arch = {
'arm': 'arm',
'i686': 'x86',
'x86_64': 'x86'
}[config.arch]
if config.toolchain == 'clang-newlib':
tc_dir = '%s_pnacl' % platform
else:
tc_dir = '%s_%s_%s' % (platform, tc_arch, config.toolchain)
tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch)
return os.path.join(GetSDKRoot(), 'toolchain', tc_dir, 'usr')
@Memoize
def GetInstallStampRoot(config):
"""Returns the installation metadata folder for the give configuration."""
tc_root = GetInstallRoot(config)
return os.path.join(tc_root, 'var', 'lib', 'npkg')
def GetInstallStamp(package_name, config):
"""Returns the filename of the install stamp for for a given package.
This file is written at install time and contains metadata
about the installed package.
"""
root = GetInstallStampRoot(config)
return os.path.join(root, package_name + '.info')
def GetListFile(package_name, config):
"""Returns the filename of the list of installed files for a given package.
This file is written at install time.
"""
root = GetInstallStampRoot(config)
return os.path.join(root, package_name + '.list')
def IsInstalled(package_name, config, stamp_content=None):
"""Returns True if the given package is installed."""
stamp = GetInstallStamp(package_name, config)
result = CheckStamp(stamp, stamp_content)
return result
def CheckSDKRoot():
"""Check validity of NACL_SDK_ROOT."""
root = GetSDKRoot()
if not os.path.isdir(root):
raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root)
landmark = os.path.join(root, 'tools', 'getos.py')
if not os.path.exists(landmark):
raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. "
"Couldn't find landmark file (%s)" % (root, landmark))
if not CheckSDKVersion(MIN_SDK_VERSION):
raise error.Error(
'This version of naclports requires at least version %s of\n'
'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n'
'to use naclports with an older version of the SDK please checkout\n'
'one of the pepper_XX branches (or run with\n'
'--skip-sdk-version-check).' % (MIN_SDK_VERSION, GetSDKVersion()))
def HashFile(filename):
"""Return the SHA1 (in hex format) of the contents of the given file."""
block_size = 100 * 1024
sha1 = hashlib.sha1()
with open(filename) as f:
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
class HashVerificationError(error.Error):
pass
def VerifyHash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
file_sha1 = HashFile(filename)
if sha1 != file_sha1:
raise HashVerificationError(
'verification failed: %s\nExpected: %s\nActual: %s' %
(filename, sha1, file_sha1))
def RemoveTree(directory):
"""Recursively remove a directory and its contents."""
if not os.path.exists(directory):
return
if not os.path.isdir(directory):
raise error.Error('RemoveTree: not a directory: %s', directory)
shutil.rmtree(directory)
def RelPath(filename):
"""Return a pathname relative to the root the naclports src tree.
This is used mostly to make output more readable when printing filenames."""
return os.path.relpath(filename, paths.NACLPORTS_ROOT)
def Makedirs(directory):
if os.path.isdir(directory):
return
if os.path.exists(directory):
raise error.Error('mkdir: File exists and is not a directory: %s'
% directory)
Trace("mkdir: %s" % directory)
os.makedirs(directory)
class Lock(object):
"""Per-directory flock()-based context manager
This class will raise an exception if another process already holds the
lock for the given directory.
"""
def __init__(self, lock_dir):
if not os.path.exists(lock_dir):
Makedirs(lock_dir)
self.file_name = os.path.join(lock_dir, 'naclports.lock')
self.fd = open(self.file_name, 'w')
def __enter__(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
raise error.Error("Unable to acquire lock (%s): Is naclports already "
"running?" % self.file_name)
def __exit__(self, exc_type, exc_val, exc_tb):
os.remove(self.file_name)
self.fd.close()
class BuildLock(Lock):
"""Lock used when building a package (essentially a lock on OUT_DIR)"""
def __init__(self):
super(BuildLock, self).__init__(paths.OUT_DIR)
class InstallLock(Lock):
"""Lock used when installing/uninstalling package"""
def __init__(self, config):
root = GetInstallRoot(config)
super(InstallLock, self).__init__(root)
CheckStdoutForColorSupport()
| {
"content_hash": "d1b3c7842de58ad85f8951c7786a4922",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 80,
"avg_line_length": 27.91294117647059,
"alnum_prop": 0.6584337857203069,
"repo_name": "Schibum/naclports",
"id": "ec5d5b1ee7f31dab983a5a8b4fed40220d09f0ef",
"size": "12031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/naclports/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "526058"
},
{
"name": "C++",
"bytes": "126079"
},
{
"name": "CMake",
"bytes": "1541"
},
{
"name": "CSS",
"bytes": "1787"
},
{
"name": "Emacs Lisp",
"bytes": "265"
},
{
"name": "HTML",
"bytes": "32873"
},
{
"name": "JavaScript",
"bytes": "220544"
},
{
"name": "Makefile",
"bytes": "43950"
},
{
"name": "Python",
"bytes": "214656"
},
{
"name": "Shell",
"bytes": "323967"
}
],
"symlink_target": ""
} |
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class UpdateCatalog(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'name': 'str',
'subscribed_applications_ids': 'list[int]'
}
attribute_map = {
'description': 'description',
'name': 'name',
'subscribed_applications_ids': 'subscribedApplicationsIds'
}
def __init__(self, description=None, name=None, subscribed_applications_ids=None, local_vars_configuration=None): # noqa: E501
"""UpdateCatalog - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._name = None
self._subscribed_applications_ids = None
self.discriminator = None
if description is not None:
self.description = description
if name is not None:
self.name = name
if subscribed_applications_ids is not None:
self.subscribed_applications_ids = subscribed_applications_ids
@property
def description(self):
"""Gets the description of this UpdateCatalog. # noqa: E501
A description of this cart item catalog. # noqa: E501
:return: The description of this UpdateCatalog. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateCatalog.
A description of this cart item catalog. # noqa: E501
:param description: The description of this UpdateCatalog. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this UpdateCatalog. # noqa: E501
Name of this cart item catalog. # noqa: E501
:return: The name of this UpdateCatalog. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateCatalog.
Name of this cart item catalog. # noqa: E501
:param name: The name of this UpdateCatalog. # noqa: E501
:type: str
"""
self._name = name
@property
def subscribed_applications_ids(self):
"""Gets the subscribed_applications_ids of this UpdateCatalog. # noqa: E501
A list of the IDs of the applications that are subscribed to this catalog. # noqa: E501
:return: The subscribed_applications_ids of this UpdateCatalog. # noqa: E501
:rtype: list[int]
"""
return self._subscribed_applications_ids
@subscribed_applications_ids.setter
def subscribed_applications_ids(self, subscribed_applications_ids):
"""Sets the subscribed_applications_ids of this UpdateCatalog.
A list of the IDs of the applications that are subscribed to this catalog. # noqa: E501
:param subscribed_applications_ids: The subscribed_applications_ids of this UpdateCatalog. # noqa: E501
:type: list[int]
"""
self._subscribed_applications_ids = subscribed_applications_ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateCatalog):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateCatalog):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "ddbbf4dec89cbcc052f4472667d5932a",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 732,
"avg_line_length": 34.4375,
"alnum_prop": 0.6140900841445306,
"repo_name": "talon-one/talon_one.py",
"id": "d9a7b8465a68776b99e7aeba4526c03a8b121219",
"size": "6078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/update_catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
} |
#coding:utf-8
__author__ = 'Jerry'
import sys
prefix = 'uro_'
"""
title comment
"""
def fileTitleComment():
return """ /**
*author:Jerry
*/\n"""
"""
help
"""
def showHelp():
print """help\n
example:
1:createObject.py obj:CWDemoObject 1 s:sName s:sTitle i:nID d:dBirtyday
2:createObject.py obj:CWDemoObject 0 s:sName s:sTitle i:nID d:dBirtyday
3:createObject.py obj:CWDemoObject s:sName s:sTitle i:nID d:dBirtyday
4:createObject.py CWDemoObject s:sName s:sTitle i:nID d:dBirtyday
obj:name (name means file name and object name.)
1: open arc flag ^-^
s:NSString
i:NSInteger
d:NSDate"""
"""
@property (nonatomic, strong) NSString* sCode;
"""
def writeObj(obj_string, fileHandle):
try:
(obj, name) = obj_string.strip().split(':')
if obj == 's':
fileHandle.write('@property (nonatomic, strong) NSString*\t\t\t%s;\n' %(name))
elif obj == 'i':
fileHandle.write('@property (nonatomic, assign) NSInteger\t\t\t%s;\n' %(name))
elif obj == 'd':
fileHandle.write('@property (nonatomic, strong) NSDate*\t\t\t%s;\n' %(name))
except ValueError as err:
print ('error %s' % (err))
"""
@synthesize _sCode;
"""
def writeSynthesize(obj_string, fileHandle):
try:
(obj, name) = obj_string.strip().split(':')
fileHandle.write('@synthesize _%s;\n' %(name))
except ValueError as err:
print ('error %s' % (err))
"""
[object release];
"""
def writeObjectRelease(obj_string, fileHandle):
try:
(obj, name) = obj_string.strip().split(':')
fileHandle.write('\n\t[_%s release];' % (name))
except ValueError as err:
print ('error %s' % (err))
def createModel(*args):
# print args
try:
args = args[0]
modelName = '@interface %s : PropertyObject' % args[1][4:]
interfaceResult = ['#import "PropertyObject.h"', modelName, '']
typeDict = {'int':'NSNumber',
'tinyint':'NSNumber',
'varchar':'NSString',
'text':'NSString',
'decimal':'NSDecimalNumber',
'datetime':'NSString'}
print args[1]
#with open("%s.sql" % (args[1]), 'r') as fn:
fn = open("%s.sql" % (args[1]), 'r')
line = fn.readline()
while line:
#print line
if line[0:3] == ' `':
item = line.split(' ')
print item[1][0:3]
if item[3][0:3] == 'int' or item[3][0:7] == 'tinyint':
interfaceResult.append('@property (nonatomic, readonly) %s *%s;' % (typeDict['int'], str(item[2]).replace('`','')))
elif item[3][0:7] == 'varchar' or item[3][0:4] == 'text':
interfaceResult.append('@property (nonatomic, readonly) %s *%s;' % (typeDict['varchar'], str(item[2]).replace('`','')))
elif item[3][0:7] == 'decimal':
interfaceResult.append('@property (nonatomic, readonly %s *%s;' % (typeDict['decimal'], str(item[2]).replace('`','')))
elif item[3][0:8] == 'datetime':
interfaceResult.append('@property (nonatomic, readonly %s *%s;' % (typeDict['datetime'], str(item[2]).replace('`','')))
line = fn.next()
print line
#interfaceResult.append('')
#interfaceResult.append('@end')
#print interfaceResult
#with open('%s.h' % modelName, 'w') as fileOutput:
# for item in interfaceResult:
# fileOutput.writelines(item)
print 'over'
"""
#create file.h
with open("%s.h" % (filename), 'w') as fn:
#write title comment
fn.write(fileTitleComment())
#write import file
fn.write('import <Foundation/Foundation.h>\n\n')
#write object
fn.write("@interface %s : NSObject\n\n" % (filename))
for eachObj in objs:
writeObj(eachObj, fn)
#write end
fn.write("\n@end")
#create file.m
with open("%s.m" % (filename), 'w') as fn:
#write import file
fn.write('#import "%s.h"\n\n' % (filename))
fn.write('@implementation %s\n\n' % (filename))
#@synthesize
for eachObj in objs:
writeSynthesize(eachObj, fn)
#init
fn.write('\n- (id) init {\n\tif (self = [super init]) {\n\t\t//add code\n\t}\n\treturn self;\n}')
#arc model
if bArc == False:
fn.write('\n\n- (void) dealloc {\n');
for eachObj in objs:
writeObjectRelease(eachObj, fn)
fn.write('\n\n\t[super dealloc]\n}\n\n');
fn.write('\n@end')
"""
except ValueError as err:
print "error : %s" % (err)
#print showHelp()
if __name__ == '__main__':
createModel(sys.argv)
| {
"content_hash": "0259eaa96a01fa5e9ae2214be6b1f4fa",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 139,
"avg_line_length": 31.944099378881987,
"alnum_prop": 0.5012638537818394,
"repo_name": "Joey-Lee/Automation-Task-by-Ruby",
"id": "297c33a7aca167f6db8e60df47f3a4fca12898aa",
"size": "5143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "createModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5143"
},
{
"name": "Ruby",
"bytes": "3166"
}
],
"symlink_target": ""
} |
import unittest
from rename import process_file
class TestProcessFile(unittest.TestCase):
binary_file_path = 'tests/tests_files/binary_file'
def setUp(self):
"This will run before any test in this class"
self.create_binary_file()
def tearDown(self):
"This will run after any test in this class"
self.remove_binary_file()
def create_binary_file(self):
"create a binary file to test with"
with open(self.binary_file_path, 'wb') as bf:
bf.write(bytearray([1, 2, 3]))
def remove_binary_file(self):
"remove the binary file created for tests"
import os
os.remove(self.binary_file_path)
def test_process_file_should_skip_binary_file(self):
"test if is_binary returns true when checking a binary file"
self.assertIsNone(
process_file('src', 'dest', 'word_option',
self.binary_file_path,
'diff', 'text_only'
)
)
| {
"content_hash": "5a63057847ee863be323608beaff1bcb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 27.08108108108108,
"alnum_prop": 0.6067864271457086,
"repo_name": "dkrikun/rename",
"id": "d6f8c980cf3a1309b8ee9a222c86022d84800970",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18492"
}
],
"symlink_target": ""
} |
from django.contrib.auth import authenticate
from rest_framework import serializers
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
raise serializers.ValidationError('User account is disabled.')
attrs['user'] = user
return attrs
else:
raise serializers.ValidationError('Unable to login with provided credentials.')
else:
raise serializers.ValidationError('Must include "username" and "password"')
| {
"content_hash": "5b17f063d6a36c645df3e7c92df142d1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 95,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.6176470588235294,
"repo_name": "hfercc/mese2014",
"id": "d1296a26080caced238256bb5f2299d5d407c933",
"size": "884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/rest_framework/authtoken/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103122"
},
{
"name": "JavaScript",
"bytes": "1054910"
},
{
"name": "Python",
"bytes": "1121791"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import logging
from datetime import timedelta, datetime
import unityapiclient
from unityapiclient.client import UnityApiClient
from b2accessdeprovisioning.configparser import config
from b2accessdeprovisioning.user import User
from b2accessdeprovisioning.notifier import MailNotifier
import b2accessdeprovisioning.util as util
DEFAULT_API_PATH = 'rest-admin'
DEFAULT_API_VERSION = 'v1'
DEFAULT_API_CERT_VERIFY = True
DEFAULT_ATTR_WHITELIST = []
DEFAULT_RETENTION_PERIOD = 365
DEFAULT_NOTIFICATION_EMAIL_HOST = 'localhost'
DEFAULT_NOTIFICATION_EMAIL_PORT = 25
DEFAULT_NOTIFICATION_EMAIL_USE_TLS = False
DEFAULT_NOTIFICATION_EMAIL_SUBJECT = 'Deprovisioned B2ACCESS accounts'
DEFAULT_NOTIFICATION_EMAIL_INTRO_TEXT = 'See attachment for details of deprovisioned B2ACCESS accounts.\n\nNote: This is an automated email, please don\'t reply.'
DEFAULT_LOG_LEVEL = 'WARNING'
DEFAULT_DRY_RUN = False
DEFAULT_API_PATH = 'rest-admin'
DEFAULT_API_VERSION = 'v1'
DEFAULT_API_CERT_VERIFY = True
DEFAULT_NOTIFICATION_EMAIL_HOST = 'localhost'
DEFAULT_NOTIFICATION_EMAIL_PORT = 25
DEFAULT_NOTIFICATION_EMAIL_USE_TLS = False
DEFAULT_LOG_LEVEL = 'WARNING'
DEFAULT_DRY_RUN = False
logger = logging.getLogger(__name__)
logging.basicConfig(level=(logging.getLevelName(util.safeget(config, 'log_level')) or DEFAULT_LOG_LEVEL))
b2access = UnityApiClient(
config['api']['base_url'],
rest_admin_path=(util.safeget(config, 'api', 'path') or DEFAULT_API_PATH),
api_version=(util.safeget(config, 'api', 'version') or DEFAULT_API_VERSION),
auth=(config['api']['user'], config['api']['password']),
cert_verify=(util.safeget(config, 'api', 'cert_verify') or DEFAULT_API_CERT_VERIFY))
notifier = MailNotifier(
host=(util.safeget(config, 'notifications', 'email', 'host') or DEFAULT_NOTIFICATION_EMAIL_HOST),
port=(util.safeget(config, 'notifications', 'email', 'port') or DEFAULT_NOTIFICATION_EMAIL_PORT),
use_tls=(util.safeget(config, 'notifications', 'email', 'use_tls') or DEFAULT_NOTIFICATION_EMAIL_USE_TLS),
user=util.safeget(config, 'notifications', 'email', 'user'),
password=util.safeget(config, 'notifications', 'email', 'password'))
dry_run = (util.safeget(config, 'dry_run') or DEFAULT_DRY_RUN)
email_from = config['notifications']['email']['from']
email_to = config['notifications']['email']['to']
def main():
user_group = util.safeget(config, 'user_group')
groups = b2access.get_group(user_group)
users = []
for member_id in groups['members']:
entity = b2access.get_entity(member_id)
if entity['entityInformation']['state'] != 'disabled':
continue
if entity['entityInformation']['scheduledOperation'] == 'REMOVE':
continue
user = User(internal_id=member_id)
users.append(user)
# Get user's shared ID
for identity in entity['identities']:
if identity['typeId'] == 'persistent':
user.shared_id = identity['value']
break
# Get user's email
email = []
attrs = b2access.get_entity_attrs(member_id, effective=False)
for attr in attrs:
if ('name' in attr and attr['name'] == 'email'):
email = attr['values']
break
user.email = email
for user in users:
_remove_user_attrs(user)
_schedule_user_removal(user)
if users:
_send_notification(users)
def _remove_user_attrs(user):
attr_whitelist = (util.safeget(config, 'attr_whitelist') or DEFAULT_ATTR_WHITELIST)
attrs = b2access.get_entity_attrs(user.internal_id, effective=False)
for attr in attrs:
if ('name' in attr and attr['name'] not in attr_whitelist and
attr['visibility'] == 'full'):
logger.debug("removing attribute '%s' from entity '%s'",
attr['name'], user.internal_id)
if not dry_run:
b2access.remove_entity_attr(user.internal_id, attr['name'])
def _schedule_user_removal(user):
when = datetime.utcnow() + timedelta(days=(util.safeget(config, 'retention_period') or DEFAULT_RETENTION_PERIOD))
logger.debug("scheduling removal of entity '%s' at '%s'",
user.internal_id, when)
if not dry_run:
b2access.schedule_operation(user.internal_id, operation='REMOVE',
when=when)
def _send_notification(users=[]):
account_details = []
for user in users:
if user.shared_id is not None:
account_details.append({'id': user.shared_id, 'email': user.email})
if not account_details:
return
attachments = []
attachment = {}
attachment['filename'] = 'users.json'
attachment['message'] = json.dumps(account_details, sort_keys=True,
indent=4, separators=(',', ': '))
attachments.append(attachment)
logger.debug("sending email notification from address '%s' to '%s' "
"with subject '%s' and attachment users.json:\n%s",
email_from,
email_to,
(util.safeget(config, 'notifications', 'email', 'subject') or DEFAULT_NOTIFICATION_EMAIL_SUBJECT),
attachment['message'])
if not dry_run:
notifier.send(email_from,
email_to,
(util.safeget(config, 'notifications', 'email', 'subject') or DEFAULT_NOTIFICATION_EMAIL_SUBJECT),
(util.safeget(config, 'notifications', 'email', 'intro_text') or DEFAULT_NOTIFICATION_EMAIL_INTRO_TEXT),
attachments)
if __name__ == "__main__":
main()
| {
"content_hash": "185dbbef4bebd7bfce546f8ebeb29190",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 162,
"avg_line_length": 39.310344827586206,
"alnum_prop": 0.6457894736842106,
"repo_name": "EUDAT-B2ACCESS/b2access-deprovisioning",
"id": "dae7555f4acdfd81b95f727c6ac33c51a02981c7",
"size": "5700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b2accessdeprovisioning/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12218"
}
],
"symlink_target": ""
} |
import uuid
# generate
def generate():
return '{' + str( uuid.uuid1() ).upper() + '}' | {
"content_hash": "cb90937839640a3a364614a4dfdf3ab8",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 47,
"avg_line_length": 17.4,
"alnum_prop": 0.5977011494252874,
"repo_name": "dmsovetov/pygling",
"id": "32ade63f74c5f398f771247a22b60120ac7dbe4a",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pygling/Generator/VisualStudio/VCX/ID.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5335"
},
{
"name": "Python",
"bytes": "231527"
},
{
"name": "Shell",
"bytes": "2859"
}
],
"symlink_target": ""
} |
"""Supplies test data for the Seeing Monitor
History:
2010-09-24
2010-09-29 ROwen modified to use RO.Alg.RandomWalk
2010-10-18 ROwen Added guide offset information.
2012-07-09 ROwen Modified to use RO.TkUtil.Timer.
"""
import math
import RO.Alg.RandomWalk
from RO.TkUtil import Timer
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("tcc")
tuiModel = testDispatcher.tuiModel
Alt = 45.0
class GuideOffInfo(object):
def __init__(self):
azScale = 1.0 / math.cos(Alt * RO.PhysConst.RadPerDeg)
lim = 10.0 / RO.PhysConst.ArcSecPerDeg
mean = 0.0 / RO.PhysConst.ArcSecPerDeg
sigma = 2.0 / RO.PhysConst.ArcSecPerDeg
self.randomValueDict = dict(
azOff = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(
mean * azScale, sigma * azScale, -lim * azScale, lim * azScale),
altOff = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(mean, sigma, -lim, lim),
rotOff = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(mean, sigma, -lim, lim),
)
def update(self):
"""Randomly change values
"""
for randomValue in self.randomValueDict.itervalues():
next(randomValue)
def getValueDict(self):
"""Get a dictionary of value name: value
"""
return dict((name, randomValue.value) for name, randomValue in self.randomValueDict.iteritems())
def getKeyVarStr(self):
"""Get the data as a keyword variable
Fields are:
az off, az vel, az time, alt off, alt vel, alt time, rot off, rot vel, rot time
where offsets are in degrees
the offsets are assumed constant so time is not interesting so I don't bother to set it realistically
"""
return "GuideOff=%(azOff)0.5f, 0.0, 100.0, %(altOff)0.5f, 0.0, 100.0, %(rotOff)0.5f, 0.0, 100.0" % \
self.getValueDict()
class StarInfo(object):
def __init__(self):
self.randomValueDict = dict(
fwhm = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(0.5, 0.1, 0.3, 1.2),
amplitude = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(10000, 100, 5000, 32000),
xCenter = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(0, 10, -500, 500),
yCenter = RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(0, 10, -500, 500),
)
def update(self):
"""Randomly change values
"""
for randomValue in self.randomValueDict.itervalues():
next(randomValue)
def getValueDict(self):
"""Get a dictionary of value name: value
"""
valDict = dict((name, randomValue.value) for name, randomValue in self.randomValueDict.iteritems())
valDict["brightness"] = valDict["fwhm"] * valDict["amplitude"]
valDict["background"] = 1200.0
return valDict
def getKeyVarStr(self):
"""Get the data as a keyword variable
The fields are as follows, where lengths and positions are in binned pixels
and intensities are in ADUs:
0 type characer: c = centroid, f = findstars, g = guide star
1 index: an index identifying the star within the list of stars returned by the command.
2,3 x,yCenter: centroid
4,5 x,yError: estimated standard deviation of x,yCenter
6 radius: radius of centroid region
7 asymmetry: a measure of the asymmetry of the object;
the value minimized by PyGuide.centroid.
Warning: not normalized, so probably not much use.
8 FWHM major
9 FWHM minor
10 ellMajAng: angle of ellipse major axis in x,y frame (deg)
11 chiSq: goodness of fit to model star (a double gaussian). From PyGuide.starShape.
12 counts: sum of all unmasked pixels within the centroid radius. From PyGuide.centroid
13 background: background level of fit to model star. From PyGuide.starShape
14 amplitude: amplitude of fit to model star. From PyGuide.starShape
For "g" stars, the two following fields are added:
15,16 predicted x,y position
"""
return "Star=c, 0, %(xCenter)0.1f, %(yCenter)0.1f, 10.0, -7.0, 5, 100.0, %(fwhm)0.2f, %(fwhm)0.2f, 0, 10, %(brightness)0.1f, %(background)0.1f, %(amplitude)0.1f" % \
self.getValueDict()
def runTest():
testDispatcher.dispatch("AxePos=0.0, %0.3f, 0" % (Alt,), actor="tcc")
_nextGuideOffset(GuideOffInfo(), 2)
_nextStar(StarInfo(), 5)
_nextSecFocus(RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(0, 10, -500, 500), 6)
_nextSecPiston(RO.Alg.RandomWalk.ConstrainedGaussianRandomWalk(100, 25, -2000, 2000), 3)
def _nextGuideOffset(guideOffInfo, delaySec):
guideOffInfo.update()
keyVarStr = guideOffInfo.getKeyVarStr()
testDispatcher.dispatch(keyVarStr, actor="tcc")
Timer(delaySec, _nextGuideOffset, guideOffInfo, delaySec)
def _nextStar(starInfo, delaySec):
starInfo.update()
keyVarStr = starInfo.getKeyVarStr()
testDispatcher.dispatch(keyVarStr, actor="gcam")
Timer(delaySec, _nextStar, starInfo, delaySec)
def _nextSecFocus(secFocus, delaySec):
keyVarStr = "SecFocus=%0.1f" % (next(secFocus),)
testDispatcher.dispatch(keyVarStr, actor="tcc")
Timer(delaySec, _nextSecFocus, secFocus, delaySec)
def _nextSecPiston(secPiston, delaySec):
keyVarStr = "SecOrient=%0.1f, 0, 0, 0, 0" % (next(secPiston),)
testDispatcher.dispatch(keyVarStr, actor="tcc")
Timer(delaySec, _nextSecPiston, secPiston, delaySec)
| {
"content_hash": "2606d1985265c8965647548ac926cda6",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 173,
"avg_line_length": 41.83206106870229,
"alnum_prop": 0.6682481751824818,
"repo_name": "r-owen/TUI",
"id": "a98fd9198a75e3cf830a0aca848903825b9c197a",
"size": "5502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TUI/Guide/GuideMonitor/TestData.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "412255"
},
{
"name": "Python",
"bytes": "1443987"
}
],
"symlink_target": ""
} |
import os
import re
import logging
import datetime
import mimetypes
import time
import jinja2
import email.Utils
from Cookie import BaseCookie
from routes import url_for
from google.appengine.ext.webapp import Response
from google.appengine.api import memcache, users
from drydrop.lib.json import json_encode
from drydrop_handler import DRY_ROOT, APP_ROOT, APP_ID, VER_ID, LOCAL
from drydrop.app.models import *
from drydrop.app.core.appceptions import *
from drydrop.lib.utils import *
from drydrop.lib.jinja_loaders import InternalTemplateLoader
from drydrop.app.helpers.buster import cache_buster
class AbstractController(object):
def __init__(self, request, response, handler):
self.request = request
self.response = response
self.handler = handler
self.view = {'params': request.params }
self.params = request.params
self.emited = False
self.cookies = request.cookies
def render(self, template_name):
env = jinja2.Environment(loader = InternalTemplateLoader(os.path.join(DRY_ROOT, 'app', 'views')))
try:
template = env.get_template(template_name)
except jinja2.TemplateNotFound:
raise jinja2.TemplateNotFound(template_name)
content = template.render(self.view)
if LOCAL:
content = cache_buster(content)
self.response.out.write(content)
def before_action(self):
pass
def after_action(self):
pass
def render_view(self, file_name, params = None):
if params:
self.view.update(params)
self.response.headers['Content-Type'] = 'text/html'
self.render(file_name)
self.emited = True
def render_text(self, text):
self.response.headers['Content-Type'] = 'text/html'
if LOCAL:
text = cache_buster(text)
self.response.out.write(text)
self.emited = True
def render_html(self, html, params = None):
if params:
self.view.update(params)
if LOCAL:
html = cache_buster(html)
self.response.out.write(html)
self.emited = True
def render_xml(self, xml):
self.response.headers['Content-Type'] = 'text/xml'
self.render(file_name)
self.emited = True
def render_json(self, json):
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json)
self.emited = True
def redirect_to(self, url):
"""Redirects to a specified url"""
# self.handler.redirect(url)
# self.emited = True
# raise PageRedirect, (url)
# mrizka delala problemy pri claimovani openid
m = re.match(r'^(.*)#.*?$', url)
if m: url = m.group(1)
logging.info("Redirecting to: %s" % url)
# send the redirect! we use a meta because appengine bombs out sometimes with long redirect urls
self.response.out.write("<html><head><meta http-equiv=\"refresh\" content=\"0;url=%s\"></head><body></body></html>" % (url,))
self.emited = True
raise PageRedirect, (url)
def notfound(self, code, message = None):
self.response.set_status(code, str(message))
if message is None: message = Response.http_status_message(code)
self.view['message'] = message
self.view['code'] = code
self.render_view('system/notfound.html')
def error(self, code, message = None):
self.response.set_status(code, str(message))
if message is None: message = Response.http_status_message(code)
self.view['message'] = message
self.view['code'] = code
self.render_view('system/error.html')
class CookieController(AbstractController):
def set_cookie(self, key, value='', max_age=None,
path='/', domain=None, secure=None, httponly=False,
version=None, comment=None):
"""
Set (add) a cookie for the response
"""
cookies = BaseCookie()
cookies[key] = value
for var_name, var_value in [
('max-age', max_age),
('path', path),
('domain', domain),
('secure', secure),
('HttpOnly', httponly),
('version', version),
('comment', comment),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name] = str(var_value)
if max_age is not None:
cookies[key]['expires'] = max_age
header_value = cookies[key].output(header='').lstrip()
self.response.headers._headers.append(('Set-Cookie', header_value))
def delete_cookie(self, key, path='/', domain=None):
"""
Delete a cookie from the client. Note that path and domain must match
how the cookie was originally set.
This sets the cookie to the empty string, and max_age=0 so
that it should expire immediately.
"""
self.set_cookie(key, '', path=path, domain=domain, max_age=0)
def unset_cookie(self, key):
"""
Unset a cookie with the given name (remove it from the
response). If there are multiple cookies (e.g., two cookies
with the same name and different paths or domains), all such
cookies will be deleted.
"""
existing = self.response.headers.get_all('Set-Cookie')
if not existing:
raise KeyError("No cookies at all have been set")
del self.response.headers['Set-Cookie']
found = False
for header in existing:
cookies = BaseCookie()
cookies.load(header)
if key in cookies:
found = True
del cookies[key]
header = cookies.output(header='').lstrip()
if header:
self.response.headers.add('Set-Cookie', header)
if not found:
raise KeyError("No cookie has been set with the name %r" % key)
class BaseController(CookieController):
SESSION_MEMCACHE_TIMEOUT = 0
CACHE_TIMEOUT = 7200
def serve_static_file(self, base_path, path, more = None, more_placeholder = None, filter=None):
file_path = os.path.join(base_path, path)
try:
logging.debug('Serving static file %s', file_path)
data = universal_read(file_path)
if filter: data = filter(data, base_path, path)
mime_type, encoding = mimetypes.guess_type(path)
self.response.headers['Content-Type'] = mime_type
self.set_caching_headers(self.CACHE_TIMEOUT)
if more and more_placeholder:
data = data.replace(more_placeholder, more)
self.response.out.write(data)
if more and not more_placeholder:
self.response.out.write(more)
except IOError:
return self.error(404, '404 File %s Not Found' % path)
def set_caching_headers(self, max_age, public = True):
self.response.headers['Expires'] = email.Utils.formatdate(time.time() + max_age, usegmt=True)
cache_control = []
if public: cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def render_json_response(self, data):
json = json_encode(data, nice=LOCAL)
is_test = self.params.get('test')
if is_test:
# this branch is here for testing purposes
return self.render_html("<html><body><pre>%s</pre></body></html>" % json)
callback = self.params.get('callback')
if callback:
# JSONP style
self.render_text("__callback__(%s);" % json)
else:
# classic style
self.render_json(json)
def format_json_response(self, message, code=1):
return {
"status": code,
"message": message,
}
def json_error(self, message, code=1):
self.render_json_response(self.format_json_response(message, code))
def json_ok(self, message = "OK"):
self.render_json_response(self.format_json_response(message, 0))
class SessionController(BaseController):
SESSION_KEY = 'session'
SESSION_COOKIE_TIMEOUT_IN_SECONDS = 60*60*24*14
session = None
def _session_memcache_id(self, session_id):
return "session-"+session_id
def create_session(self, user_id):
self.session = Session(user_id=user_id)
self.session.save()
logging.debug("Created session: %s", self.session.get_id())
def load_session(self):
if self.session: return self.session
logging.debug("Loading session ...")
# look for session id in request and cookies
session_id = self.request.get(self.SESSION_KEY)
if not session_id: session_id = self.cookies.get(self.SESSION_KEY)
if not session_id:
logging.debug("session_id not found in %s", self.cookies)
return None
# hit memcache first
cache_id = self._session_memcache_id(session_id)
self.session = memcache.get(cache_id)
if self.session:
logging.debug("Session found in memcache %s", self.session)
return self.session
# hit database if not in memcache
self.session = Session.get(session_id)
if self.session:
logging.debug("Session loaded from store %s", self.session)
memcache.set(cache_id, self.session, self.SESSION_MEMCACHE_TIMEOUT)
return self.session
# session not found
return None
def store_session(self):
assert self.session
cache_id = self._session_memcache_id(self.session.get_id())
logging.debug("Storing session (%s) into memcache as %s" % (self.session, cache_id))
self.set_cookie(self.SESSION_KEY,
str(self.session.key()),
max_age=self.SESSION_COOKIE_TIMEOUT_IN_SECONDS
)
memcache.set(cache_id, self.session, self.SESSION_MEMCACHE_TIMEOUT)
self.session.save()
def clear_session_cookie(self):
logging.debug("Clearing session cookie (%s)" % self.SESSION_KEY)
self.delete_cookie(self.SESSION_KEY)
def clear_session(self):
if not self.session:
if not self.load_session(): return
logging.debug("Clearing session %s", self.session)
cache_id = self._session_memcache_id(self.session.get_id())
memcache.delete(cache_id)
self.session.delete()
class AuthenticatedController(SessionController):
def __init__(self, *arguments, **keywords):
super(AuthenticatedController, self).__init__(*arguments, **keywords)
self.user = None
def authenticate_user(self, url=None):
self.user = users.get_current_user()
if not self.user:
return self.redirect_to(users.create_login_url(url or self.request.url))
logging.info('Authenticated as user %s', self.user)
def before_action(self, *arguments, **keywords):
if super(AuthenticatedController, self).before_action(*arguments, **keywords): return True
return self.authenticate_user() | {
"content_hash": "505808462fcb78cfeb0e826d7cc9effa",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 133,
"avg_line_length": 37.636963696369634,
"alnum_prop": 0.6011925640126271,
"repo_name": "harperreed/drydrop",
"id": "7a4b5859452cd763453b2b9dff1d5bf6863ce69e",
"size": "11442",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dryapp/drydrop/app/core/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "30124"
},
{
"name": "Python",
"bytes": "916500"
}
],
"symlink_target": ""
} |
import re
import urlparse
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Link
from selenium.webdriver.common.action_chains import ActionChains
# from hubcheck.pageobjects.widgets.search import Search
class Header(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(Header,self).__init__(owner,locatordict)
# load hub's classes
Header_Locators = self.load_class('Header_Locators')
# update this object's locator
self.locators.update(Header_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.login = Link(self,{'base':'login'})
self.register = Link(self,{'base':'register'})
self.logout = Link(self,{'base':'logout'})
self.myaccount = Link(self,{'base':'myaccount'})
# self.search = Search(self,{'base':'search'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedOut(self):
widgets = [self.login,self.register]
self._checkLocators(widgets=widgets,cltype='LoggedOut')
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_login(self):
"""click the login link"""
return self.login.click()
def goto_register(self):
"""click the register link"""
return self.register.click()
def goto_logout(self):
"""click the logout link"""
self.logout.click()
message = 'logout button visible while trying to logout'
self.logout.wait_until_invisible(message)
return
def goto_myaccount(self):
"""click the link to go to the member's myaccount page"""
return self.myaccount.click()
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
return self.logout.is_displayed()
def get_account_number(self):
"""return the user's account number based on the "My Account" url"""
url = self.myaccount.get_attribute('href')
if not url:
raise RuntimeError("link '%s' has no href" % (self.myaccount.locator))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" % (path))
account_number = matches.group(1)
return account_number
class Header_Locators_Base(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'search' : "css=#searchform",
}
class Header_Locators_Base_2(object):
"""locators for Header object"""
# https://manufacturinghub.org/login
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a:nth-child(1)",
'myaccount' : "css=#logout a:nth-child(2)",
'search' : "css=#searchform",
}
class Header_Locators_Base_3(object):
"""
locators for Header object
used on polytechhub
"""
locators = {
'base' : "css=#top",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-info",
'search' : "css=#searchword",
}
class Header_Locators_Base_4(object):
"""
locators for Header object
used on nanohub
"""
locators = {
'base' : "css=#header",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#logout",
'myaccount' : "css=#usersname",
'search' : "css=#searchword",
}
class Header1(Header):
def __init__(self, owner, locatordict={}):
super(Header1,self).__init__(owner,locatordict)
# setup page object's additional components
self.profile = Link(self,{'base':'profile'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount,self.profile]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_profile(self):
"""click the link to go to the member's profile page"""
return self.profile.click()
class Header1_Locators_Base(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'profile' : "css=#username a",
'search' : "css=#searchform",
}
class Header1_Locators_Base_2(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-dashboard",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header1_Locators_Base_4(object):
"""locators for geoshareproject"""
locators = {
'base' : "css=#header",
'login' : "css=#searchlogin > p > a:nth-child(1)",
'register' : "css=#searchlogin > p > a:nth-child(2)",
'logout' : "css=#searchlogin > p > a:nth-child(3)",
'myaccount' : "css=#searchlogin > p > a:nth-child(2)",
'profile' : "css=#searchlogin > p > a:nth-child(1)",
'search' : "css=#searchForm",
}
class Header1_Locators_Base_5(object):
"""locators for Header object
login and register are the same link
"""
locators = {
'base' : "css=#header",
'login' : "css=#register a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'profile' : "css=#username a",
'search' : "css=#searchform",
}
class Header2(BasePageWidget):
"""
represents header on hubs that use a javascripty dropdown
menu to hold account links for dashboard, profile, messages
and logout.
"""
def __init__(self, owner, locatordict={}):
super(Header2,self).__init__(owner,locatordict)
# load hub's classes
Header_Locators = self.load_class('Header_Locators')
# update this object's locator
self.locators.update(Header_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.login = Link(self,{'base':'login'})
self.register = Link(self,{'base':'register'})
self.logout = Link(self,{'base':'logout'})
self.details = Link(self,{'base':'details'})
self.dashboard = Link(self,{'base':'dashboard'})
self.messages = Link(self,{'base':'messages'})
self.profile = Link(self,{'base':'profile'})
# self.search = Search(self,'search')
self._links = ['details','dashboard','messages','profile','logout']
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedOut(self,widgets=None,cltype=""):
widgets = [self.login]
self._checkLocators(widgets=widgets,cltype='LoggedOut')
def _checkLocatorsLoggedIn(self,widgets=None,cltype=""):
widgets = [self.logout,self.dashboard,
self.messages,self.profile]
base = self.owner.find_element(self.locators['acctbase'])
# hover mouse over the group manager toolbar to expand it
actionProvider = ActionChains(self.owner._browser)\
.move_to_element(base)
actionProvider.perform()
# check for locators
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def get_options_items(self):
return self._links
def goto_options_item(self,link):
"""this function does selenium specific stuff"""
if not link in self._links:
raise ValueError("invalid link name: '%s'",link)
# hover mouse over the account toolbar to expand it
# move the mouse to the correct link and click it
menu = self.find_element(self.locators['acctbase'])
loc = self.locators[link]
menu_item = self.find_element(loc)
self.logger.debug("moving mouse over account dropdown")
self.logger.debug("clicking drowdown menu option '%s': %s" % (link,loc))
actionProvider = ActionChains(self.owner._browser)\
.move_to_element(menu)\
.move_to_element(menu_item)\
.click()
actionProvider.perform()
def goto_login(self):
return self.login.click()
def goto_register(self):
return self.register.click()
def goto_logout(self):
lockey = 'logout'
self.goto_options_item(lockey)
# wait until the element is no longer visible (ie. the menu has closed)
# before proceeding to the next task
loc = self.locators[lockey]
self.wait_until_not_present(locator=loc)
def goto_myaccount(self):
# deprecated function, use goto_dashboard() instead
return self.goto_options_item('dashboard')
def goto_dashboard(self):
return self.goto_options_item('dashboard')
def goto_messages(self):
return self.goto_options_item('messages')
def goto_profile(self):
return self.goto_options_item('profile')
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
# return not self.login.is_displayed()
return self.logout.is_present()
def get_account_number(self):
"""return the user's account number based on the "Username" url"""
url = None
# use dashboard instead of details because some hubs (like catalyzecare)
# don't make details a link.
url = self.dashboard.get_attribute('href')
if url is None:
raise RuntimeError("link '%s' has no href" \
% (self.details.locators['base']))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
# the url looks something like:
# https://hubname.org/members/1234/dashboard
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" \
% (path))
account_number = matches.group(1)
return account_number
class Header2_Locators_Base(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#header",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=#account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_2(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_3(object):
"""locators for Header2 object
these are used in afrl
"""
locators = {
'base' : "css=#utilities",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_4(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_5(object):
"""locators for Header2 object
login and register is one link
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_6(object):
"""locators for Header2 object
login and register is one link
updated locators to include anchor
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout a",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard a",
'messages' : "css=#account-messages a",
'profile' : "css=#account-profile a",
'search' : "css=#searchform",
}
class Header2_Locators_Base_7(object):
"""locators for Header2 object
separate login and register links
updated locators to include anchor
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#account-logout a",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard a",
'messages' : "css=#account-messages a",
'profile' : "css=#account-profile a",
'search' : "css=#searchform",
}
class Header3(Header):
"""
represents header on hubs where the username and my account links
lead to the my account/dashboard page, and there is no profile link.
generally found in older templates. here we use the username link
to get the account number
"""
def __init__(self, owner, locatordict={}):
super(Header3,self).__init__(owner,locatordict)
# setup page object's additional components
self.username = Link(self,{'base':'username'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount,self.username]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_username(self):
"""click the username link to go to the member's account page"""
return self.username.click()
def get_account_number(self):
"""return the user's account number based on the "Username" link"""
url = self.username.get_attribute('href')
if not url:
raise RuntimeError("link '%s' has no href" % (self.username.locator))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" % (path))
account_number = matches.group(1)
return account_number
class Header3_Locators_Base_1(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'username' : "css=#username a",
'search' : "css=#searchform",
}
class Header3_Locators_Base_2(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'username' : "css=#usersname a",
'search' : "css=#searchform",
}
class Header3_Locators_Base_3(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#who > a:nth-child(1)",
'register' : "css=#who > a:nth-child(2)",
'logout' : "css=#account > a:nth-child(1)",
'myaccount' : "css=#account > a:nth-child(2)",
'username' : "css=#who > a:nth-child(1)",
'search' : "css=#sitesearch",
}
class Header3_Locators_Base_4(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-dashboard",
'username' : "css=#username",
'search' : "css=#sitesearch",
}
| {
"content_hash": "7aa8d48db6f403aafd26c6425229427f",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 87,
"avg_line_length": 30.238170347003155,
"alnum_prop": 0.5495279328151896,
"repo_name": "codedsk/hubcheck",
"id": "762b5bc9fe619556a4fb502ff295f0eea8359d46",
"size": "19171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hubcheck/pageobjects/widgets/header.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1248"
},
{
"name": "Makefile",
"bytes": "846"
},
{
"name": "Python",
"bytes": "1355360"
},
{
"name": "Shell",
"bytes": "1483"
}
],
"symlink_target": ""
} |
"""Base class for classes that need modular database access."""
from oslo_utils import importutils
from oslo_db.sqlalchemy.session import *
import nova.conf
CONF = nova.conf.CONF
class Base(object):
"""DB driver is injected in the init method."""
def __init__(self, db_driver=None):
super(Base, self).__init__()
if not db_driver:
db_driver = CONF.db_driver
self.db = importutils.import_module(db_driver)
#self.session = Session()
def session(self):
return Session()
| {
"content_hash": "7288b2005f2317fbf0e548231747417c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 63,
"avg_line_length": 24.454545454545453,
"alnum_prop": 0.6394052044609665,
"repo_name": "xuweiliang/Codelibrary",
"id": "a18b2a2637263ba3aff836669f200574f968d6e6",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/db/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = u'DeepOBS'
copyright = u'2019, Frank Schneider'
author = u'Frank Schneider, Lukas Balles & Philipp Hennig'
# The short X.Y version
version = u'1.1'
# The full version, including alpha/beta/rc tags
release = u'1.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Theme options
html_theme_options = {
'collapse_navigation': False, # Collapse navigation
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepOBSdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepOBS.tex', u'DeepOBS Documentation',
u'Frank Schneider', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepobs', u'DeepOBS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepOBS', u'DeepOBS Documentation',
author, 'DeepOBS', 'Documentation for the DeepOBS package.',
'Frank Schneider'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
smartquotes = False
| {
"content_hash": "f79acee059172be8123a9f1cf07e9471",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 78,
"avg_line_length": 28.872832369942195,
"alnum_prop": 0.6412412412412413,
"repo_name": "fsschneider/DeepOBS",
"id": "fc003343e2c1d3900e10c4170540611062370d7a",
"size": "5559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "368026"
},
{
"name": "Shell",
"bytes": "8516"
}
],
"symlink_target": ""
} |
"""Utility to generate Redfish accessor classes.
This binary does not accept any command line arguments. The accessors are
generated based entirely upon embedded data.
"""
import argparse
import sys
from typing import Sequence
import jinja2
from ecclesia.lib.jinja2 import loader
from ecclesia.lib.redfish.toolchain.internal import descriptor_pb2
# Directory relative to ecclesia/.
_PACKAGE_DIR = 'ecclesia/lib/redfish/toolchain/internal/accessors'
# Map of descriptor primitive types to their C++ equivalent.
_PRIMITIVE_TYPE_MAP = {
descriptor_pb2.Property.Type.PrimitiveType.BOOLEAN: 'bool',
descriptor_pb2.Property.Type.PrimitiveType.INT64: 'int64_t',
descriptor_pb2.Property.Type.PrimitiveType.STRING: 'std::string',
descriptor_pb2.Property.Type.PrimitiveType.DECIMAL: 'double',
descriptor_pb2.Property.Type.PrimitiveType.DOUBLE: 'double',
descriptor_pb2.Property.Type.PrimitiveType.DATE_TIME_OFFSET: 'absl::Time',
descriptor_pb2.Property.Type.PrimitiveType.DURATION: 'absl::Duration',
descriptor_pb2.Property.Type.PrimitiveType.GUID: 'uint64_t',
}
class ProfileDescriptor:
"""Wraps a descriptor protobuf and provides some common member transforms."""
def __init__(self, pb: descriptor_pb2.Profile):
self.pb = pb
self.sanitized_profile_name = pb.profile_name.replace(' ', '')
def type_to_string(ptype: descriptor_pb2.Property.Type) -> str:
"""Converts a Property.Type to a C++ string.
Args:
ptype: property type to convert.
Returns:
string of the C++ type.
Raises:
NotImplementedError if the translation cannot be completed.
"""
oneof = ptype.WhichOneof('type')
if not oneof:
return ''
attr = getattr(ptype, oneof)
if oneof == 'primitive':
if attr in _PRIMITIVE_TYPE_MAP:
return _PRIMITIVE_TYPE_MAP[attr]
raise NotImplementedError(
f'type_to_string() cannot map primitive type "{attr}" into a C++ type')
elif isinstance(attr, type(ptype.reference)) or isinstance(
attr, type(ptype.collection)):
# trivial placeholder.
return 'RedfishVariant'
else:
raise NotImplementedError(
f'type_to_string() cannot map oneof "{oneof}" into a C++ type')
def main(argv: Sequence[str]) -> None:
parser = argparse.ArgumentParser(
description='Generate profile based accessors.')
parser.add_argument(
'--proto_path_in',
type=str,
help='filepath of the compiled proto file for input')
parser.add_argument('--h_path', type=str, help='filepath of the .h file')
parser.add_argument(
'--h_include', type=str, help='filepath for including header file')
parser.add_argument('--cc_path', type=str, help='filepath of the .cc file')
args = parser.parse_args(argv[1:])
with open(args.proto_path_in, 'rb') as f:
pb = descriptor_pb2.Profile.FromString(f.read())
render_dict = {
'profiles': [ProfileDescriptor(pb)],
'header_filepath': args.h_include
}
# Use the constructed environment to render the template.
jinja_env = jinja2.Environment(
loader=loader.ResourceLoader(_PACKAGE_DIR, 'ecclesia'),
undefined=jinja2.StrictUndefined)
jinja_env.globals['type_to_string'] = type_to_string
with open(args.cc_path, 'w') as f:
jinja_template = jinja_env.get_template('accessors.cc.jinja2')
f.write(jinja_template.render(**render_dict))
with open(args.h_path, 'w') as f:
jinja_template = jinja_env.get_template('accessors.h.jinja2')
f.write(jinja_template.render(**render_dict))
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "427476a7f33f5f72098b9c262d0e9737",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 32.7962962962963,
"alnum_prop": 0.7049689440993789,
"repo_name": "google/ecclesia-machine-management",
"id": "f925390ca80d338e8776850b46db5ba3b803a008",
"size": "3542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecclesia/lib/redfish/toolchain/internal/accessors/generate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASL",
"bytes": "12564"
},
{
"name": "C++",
"bytes": "1834745"
},
{
"name": "Jinja",
"bytes": "1748"
},
{
"name": "Python",
"bytes": "34199"
},
{
"name": "Shell",
"bytes": "1152"
},
{
"name": "Starlark",
"bytes": "169738"
}
],
"symlink_target": ""
} |
class MyResponse:
def __init__(self, content, status_code=200, headers={}, final_url='http://www.google.ca/'):
self.content = content
self.content_was_truncated = False
self.status_code = 200
self.headers = headers
self.final_url = final_url
class MyOpener:
def __init__(self, *responses):
self.responses = []
for response in responses:
if not isinstance(response, MyResponse):
response = MyResponse(response)
self.responses.append(response)
def __call__(self, url):
self.last_request = {'url': url}
response = self.responses.pop(0)
return response
class MyXisbnWebService:
def __init__(self):
self.edition_map = {}
def __setitem__(self, isbn, editions):
saved_editions = [isbn] + editions
self.edition_map[isbn] = saved_editions
def get_editions(self, isbn):
if isbn in self.edition_map:
return self.edition_map[isbn]
return [isbn]
def to13(self, isbn10):
return isbn10 + '147'
def to10(self, isbn13):
return isbn13[3:]
class MyCache:
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key, None)
def set(self, key, value, *args):
self.cache[key] = value
| {
"content_hash": "90d79c4e843534f16eb69539df139f07",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 96,
"avg_line_length": 26,
"alnum_prop": 0.5791420118343196,
"repo_name": "blairconrad/LibraryLookup",
"id": "0b0df18ca38668b114072c2431cc04a3b0a1950f",
"size": "1352",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/fakes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "693"
},
{
"name": "HTML",
"bytes": "302305"
},
{
"name": "JavaScript",
"bytes": "13245"
},
{
"name": "Python",
"bytes": "24909"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.openstack.scenarios.magnum import utils
from tests.unit import test
class MagnumScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(MagnumScenarioTestCase, self).setUp()
self.baymodel = mock.Mock()
self.scenario = utils.MagnumScenario(self.context)
def test_list_baymodels(self):
scenario = utils.MagnumScenario(self.context)
fake_baymodel_list = [self.baymodel]
self.clients("magnum").baymodels.list.return_value = fake_baymodel_list
return_baymodels_list = scenario._list_baymodels()
self.assertEqual(fake_baymodel_list, return_baymodels_list)
self.clients("magnum").baymodels.list.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"magnum.list_baymodels")
def test_create_baymodel(self):
self.scenario.generate_random_name = mock.Mock(
return_value="generated_name")
fake_baymodel = self.baymodel
self.clients("magnum").baymodels.create.return_value = fake_baymodel
return_baymodel = self.scenario._create_baymodel(
image="test_image",
keypair="test_key",
external_network="public",
dns_nameserver="8.8.8.8",
flavor="m1.large",
docker_volume_size=50,
network_driver="docker",
coe="swarm")
self.assertEqual(fake_baymodel, return_baymodel)
args, kwargs = self.clients("magnum").baymodels.create.call_args
self.assertEqual("generated_name", kwargs["name"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"magnum.create_baymodel")
| {
"content_hash": "7ab35269db328c23eaa0be67f846a09c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 38.58695652173913,
"alnum_prop": 0.632112676056338,
"repo_name": "vganapath/rally",
"id": "747a1edd4eb9f8abd9faa9285ad7462ed80f70bb",
"size": "2373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/magnum/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from fileupload.models import File
class FileAdmin(admin.ModelAdmin):
readonly_fields = ('file_tag',)
list_display = ('file', 'file_tag_thumb', 'created')
admin.site.register(File, FileAdmin)
| {
"content_hash": "b9113020d0597b453925e13ee574febe",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 23.7,
"alnum_prop": 0.7257383966244726,
"repo_name": "stahlnow/stahlnow",
"id": "413a4014b82243c051a8524f7eb4dcf41e0db2aa",
"size": "237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/fileupload/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31910"
},
{
"name": "HTML",
"bytes": "25569"
},
{
"name": "JavaScript",
"bytes": "77863"
},
{
"name": "Python",
"bytes": "59788"
}
],
"symlink_target": ""
} |
from colander import MappingSchema, SchemaNode, String, Integer, Boolean
from colander import Length, DateTime, instantiate
from colander import SequenceSchema, OneOf, drop
import colander
from ode.models import TAG_MAX_LENGTH, SAFE_MAX_LENGTH
def default_schema_node():
return SchemaNode(String(), missing='',
validator=Length(1, SAFE_MAX_LENGTH))
class MediaSchema(MappingSchema):
license = default_schema_node()
url = SchemaNode(String(), validator=colander.url)
def remove_timezone(dt):
if dt is colander.null:
return dt
else:
return dt.replace(tzinfo=None)
class EventSchema(MappingSchema):
id = SchemaNode(String(), missing=drop,
validator=Length(1, SAFE_MAX_LENGTH))
provider_id = default_schema_node()
title = SchemaNode(String(), missing='',
validator=Length(1, SAFE_MAX_LENGTH))
email = SchemaNode(String(), validator=colander.Email())
firstname = default_schema_node()
lastname = default_schema_node()
telephone = default_schema_node()
description = SchemaNode(String(), missing='')
event_id = default_schema_node()
email = SchemaNode(String(), missing='', validator=colander.Email())
firstname = default_schema_node()
language = default_schema_node()
lastname = default_schema_node()
latlong = default_schema_node()
price_information = default_schema_node()
organiser = default_schema_node()
performers = default_schema_node()
press_url = SchemaNode(String(), missing='', validator=colander.url)
source_id = default_schema_node()
source = default_schema_node()
target = default_schema_node()
telephone = default_schema_node()
url = SchemaNode(String(), missing='', validator=colander.url)
location_name = default_schema_node()
location_address = default_schema_node()
location_post_code = default_schema_node()
location_town = default_schema_node()
location_capacity = default_schema_node()
location_country = default_schema_node()
start_time = SchemaNode(DateTime(default_tzinfo=None),
preparer=remove_timezone)
end_time = SchemaNode(DateTime(default_tzinfo=None), missing=None,
preparer=remove_timezone)
publication_start = SchemaNode(DateTime(default_tzinfo=None), missing=None,
preparer=remove_timezone)
publication_end = SchemaNode(DateTime(default_tzinfo=None), missing=None,
preparer=remove_timezone)
press_contact_email = SchemaNode(String(), missing='',
validator=colander.Email())
press_contact_name = default_schema_node()
press_contact_phone_number = default_schema_node()
ticket_contact_email = SchemaNode(String(), missing='',
validator=colander.Email())
ticket_contact_name = default_schema_node()
ticket_contact_phone_number = default_schema_node()
@instantiate(missing=[])
class videos(SequenceSchema):
video = MediaSchema()
@instantiate(missing=[])
class sounds(SequenceSchema):
sound = MediaSchema()
@instantiate(missing=[])
class images(SequenceSchema):
image = MediaSchema()
@instantiate(missing=[])
class tags(SequenceSchema):
name = SchemaNode(String(), validator=Length(1, TAG_MAX_LENGTH))
@instantiate(missing=[])
class categories(SequenceSchema):
name = SchemaNode(String(), validator=Length(1, TAG_MAX_LENGTH))
class EventCollectionSchema(MappingSchema):
@instantiate()
class items(SequenceSchema):
@instantiate()
class item(MappingSchema):
data = EventSchema()
class SourceSchema(MappingSchema):
url = SchemaNode(String(), validator=colander.url)
active = SchemaNode(Boolean(), missing=False)
class SourceCollectionSchema(MappingSchema):
@instantiate()
class items(SequenceSchema):
@instantiate()
class item(MappingSchema):
data = SourceSchema()
COLLECTION_MAX_LENGTH = 100
class QueryStringSchema(MappingSchema):
limit = SchemaNode(Integer(), missing=drop,
validator=colander.Range(0, COLLECTION_MAX_LENGTH))
offset = SchemaNode(Integer(), missing=drop)
sort_by = SchemaNode(String(), missing=drop)
sort_direction = SchemaNode(String(), missing='asc',
validator=OneOf(['asc', 'desc']))
provider_id = SchemaNode(String(), missing=drop,
validator=Length(1, SAFE_MAX_LENGTH))
start_time = SchemaNode(DateTime(default_tzinfo=None), missing=drop)
end_time = SchemaNode(DateTime(default_tzinfo=None), missing=drop)
| {
"content_hash": "0bf149e56b70f9adcb779c21947c6f41",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 35.08759124087591,
"alnum_prop": 0.6527980029124194,
"repo_name": "makinacorpus/ODE",
"id": "51206f18ca7bfbbbcaba3445e2f595be60404ddc",
"size": "4807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ode/validation/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6015"
},
{
"name": "Python",
"bytes": "115251"
}
],
"symlink_target": ""
} |
'''
Created on Mar 7, 2017
@author: Leo Zhong
'''
import csv
import random
import math
import operator
def loadDataset(filename, split, trainingSet = [], testSet = []):
#split data set to trainingSet and testSet
with open(filename, 'r') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset)-1):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
distance += pow((instance1[x]-instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k):
distances = []
length = len(testInstance)-1
for x in range(len(trainingSet)):
#testinstance
dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
#distances.append(dist)
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] == predictions[x]:
correct += 1
return (correct/float(len(testSet)))*100.0
def main():
#prepare data
trainingSet = []
testSet = []
split = 0.67
loadDataset(r'F:\MachineLearning\irisdata.txt', split, trainingSet, testSet)
print ('Train set: ' + repr(len(trainingSet)))
print ('Test set: ' + repr(len(testSet)))
#generate predictions
predictions = []
k = 3
for x in range(len(testSet)):
# trainingsettrainingSet[x]
neighbors = getNeighbors(trainingSet, testSet[x], k)
result = getResponse(neighbors)
predictions.append(result)
print ('>predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))
print ('predictions: ' + repr(predictions))
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: ' + repr(accuracy) + '%')
if __name__ == '__main__':
main()
| {
"content_hash": "a0bd4bf1b65ac38ca930fcf3e76b504f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 86,
"avg_line_length": 28.182795698924732,
"alnum_prop": 0.6104540251812285,
"repo_name": "LeoZ123/Machine-Learning-Practice",
"id": "f60fee354adc372bf852485c52dd2f3c02e2e316",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "K_Nearest_Neighbor(KNN)/KNN_Implementation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30188"
}
],
"symlink_target": ""
} |
from .lda import Lda
from .ldx import Ldx
from .ldy import Ldy
from .sta import Sta
from .stx import Stx
from .sty import Sty
| {
"content_hash": "9f751f036c7e6c685d79f160917e565b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.7619047619047619,
"repo_name": "Hexadorsimal/pynes",
"id": "cda716764cff64533e1bec2229850c45b4b46e35",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nes/processors/cpu/instructions/load_store/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42305"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemglobal_authenticationpolicy_binding(base_resource) :
""" Binding class showing the authenticationpolicy that can be bound to systemglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._builtin = []
self.___count = 0
@property
def priority(self) :
"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
@builtin.setter
def builtin(self, builtin) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL
"""
try :
self._builtin = builtin
except Exception as e:
raise e
@property
def policyname(self) :
"""The name of the command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name of the command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemglobal_authenticationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemglobal_authenticationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemglobal_authenticationpolicy_binding()
updateresource.policyname = resource.policyname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemglobal_authenticationpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a systemglobal_authenticationpolicy_binding resources.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count systemglobal_authenticationpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class systemglobal_authenticationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemglobal_authenticationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemglobal_authenticationpolicy_binding = [systemglobal_authenticationpolicy_binding() for _ in range(length)]
| {
"content_hash": "060ae32815e89f85ada5fc3713cee9d0",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 143,
"avg_line_length": 30.621890547263682,
"alnum_prop": 0.714378554021121,
"repo_name": "mahabs/nitro",
"id": "5f8a30e6f8bbb493b0b151cb5e188e8c5695b720",
"size": "6769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/system/systemglobal_authenticationpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
"""
Converts ClinVar XML into
RDF triples to be ingested by SciGraph.
These triples conform to the core of the
SEPIO Evidence & Provenance model
We also use the clinvar curated gene to disease
mappings to discern the functional consequence of
a variant on a gene in cases where this is ambiguous.
For example, some variants are located in two
genes overlapping on different strands, and may
only have a functional consequence on one gene.
This is suboptimal and we should look for a source
that directly provides this.
creating a test set.
get a full dataset default ClinVarFullRelease_00-latest.xml.gz
get the mapping file default gene_condition_source_id
get a list of RCV default CV_test_RCV.txt
put the input files the raw directory
write the test set back to the raw directory
./scripts/ClinVarXML_Subset.sh | gzip > raw/clinvar/ClinVarTestSet.xml.gz
parsing a test set (Skolemizing blank nodes i.e. for Protege)
dipper/sources/ClinVar.py -f ClinVarTestSet.xml.gz -o ClinVarTestSet_`datestamp`.nt
For while we are still required to redundantly conflate the owl properties
in with the data files.
python3 ./scripts/add-properties2turtle.py \
--input ./out/ClinVarTestSet_`datestamp`.nt \
--output ./out/ClinVarTestSet_`datestamp`.nt --format nt
"""
import os
import re
import gzip
import csv
import hashlib
import logging
import argparse
import xml.etree.ElementTree as ElementTree
from typing import List, Dict
import yaml
from dipper.models.ClinVarRecord import ClinVarRecord, Gene,\
Variant, Allele, Condition, Genotype
from dipper import curie_map
from dipper.models.BiolinkVocabulary import BioLinkVocabulary as blv
LOG = logging.getLogger(__name__)
# The name of the ingest we are doing
IPATH = re.split(r'/', os.path.realpath(__file__))
(INAME, DOTPY) = re.split(r'\.', IPATH[-1].lower())
RPATH = '/' + '/'.join(IPATH[1:-3])
GLOBAL_TT_PATH = RPATH + '/translationtable/GLOBAL_TERMS.yaml'
LOCAL_TT_PATH = RPATH + '/translationtable/' + INAME + '.yaml'
CV_FTP = 'ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar'
# Global translation table
# Translate labels found in ontologies
# to the terms they are for
GLOBALTT = {}
with open(GLOBAL_TT_PATH) as fh:
GLOBALTT = yaml.safe_load(fh)
# Local translation table
# Translate external strings found in datasets
# to specific labels found in ontologies
LOCALTT = {}
with open(LOCAL_TT_PATH) as fh:
LOCALTT = yaml.safe_load(fh)
CURIEMAP = curie_map.get()
CURIEMAP['_'] = 'https://monarchinitiative.org/.well-known/genid/'
# regular expression to limit what is found in the CURIE identifier
# it is ascii centric and may(will) not pass some valid utf8 curies
CURIERE = re.compile(r'^.*:[A-Za-z0-9_][A-Za-z0-9_.]*[A-Za-z0-9_]*$')
def make_spo(sub, prd, obj, subject_category=None, object_category=None):
"""
Decorates the three given strings as a line of ntriples
(also writes a triple for subj biolink:category and
obj biolink:category)
"""
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are always uri (unless a bnode)
# prd are always uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = sub.split(r':')
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = prd.split(r':')
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
subjt = ''
# object is a curie or bnode or literal [string|number] NOT None.
assert (obj is not None), '"None" object for subject ' + sub + ' & pred ' + prd
# object is NOT empty.
assert (obj != ''), 'EMPTY object for subject ' + sub + ' & pred ' + prd
if sub is None:
LOG.error("make_spo() was passed sub of None!")
return ""
if obj is None or obj == '':
LOG.error("make_spo() was passed obj of None/empty")
return ""
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isdigit():
objt = '"' + obj + '"^^<http://www.w3.org/2001/XMLSchema#integer>'
elif obj.isnumeric():
objt = '"' + obj + '"^^<http://www.w3.org/2001/XMLSchema#double>'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
# for downstream sanity any control chars should be escaped
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
else:
raise ValueError(
"Can not work with: <{}> {} , <{}> {}, {}".format(
subcuri, subid, prdcuri, prdid, objt))
triples = subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + " .\n"
if subject_category is not None:
triples = triples + make_biolink_category_triple(subjt, subject_category)
if object_category is not None:
triples = triples + make_biolink_category_triple(objt, object_category)
return triples
def is_literal(thing):
"""
make inference on type (literal or CURIE)
return: logical
"""
if re.match(CURIERE, thing) is not None or\
thing.split(':')[0].lower() in ('http', 'https', 'ftp'):
object_is_literal = False
else:
object_is_literal = True
return object_is_literal
def make_biolink_category_triple(subj, cat):
this_triple = ''
if is_literal(subj):
return this_triple
try:
this_triple = " ".join(
[subj, expand_curie(blv.terms['category']), expand_curie(cat), " .\n"])
except ValueError:
this_triple = ''
return this_triple
def expand_curie(this_curie):
match = re.match(CURIERE, this_curie)
if match is not None:
try:
(curie_prefix, this_id) = re.split(r':', this_curie)
except ValueError:
match = None
if match is not None and curie_prefix in CURIEMAP:
iri = CURIEMAP[curie_prefix] + this_id.strip()
# allow unexpanded bnodes in object
if curie_prefix != '_' or CURIEMAP[curie_prefix] != '_:b':
iri = '<' + iri + '>'
elif this_curie.isnumeric():
iri = '"' + this_curie + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
this_curie = this_curie.strip('"').replace('\\', '\\\\').replace('"', '\'')
this_curie = this_curie.replace('\n', '\\n').replace('\r', '\\r')
iri = '"' + this_curie + '"'
return iri
def write_spo(sub, prd, obj, triples, subject_category=None, object_category=None):
"""
write triples to a buffer in case we decide to drop them
"""
triples.append(make_spo(
sub, prd, obj,
subject_category=subject_category, object_category=object_category))
def scv_link(scv_sig, rcv_trip):
'''
Creates links between SCV based on their pathonicty/significance calls
# GENO:0000840 - GENO:0000840 --> is_equilavent_to SEPIO:0000098
# GENO:0000841 - GENO:0000841 --> is_equilavent_to SEPIO:0000098
# GENO:0000843 - GENO:0000843 --> is_equilavent_to SEPIO:0000098
# GENO:0000844 - GENO:0000844 --> is_equilavent_to SEPIO:0000098
# GENO:0000840 - GENO:0000844 --> contradicts SEPIO:0000101
# GENO:0000841 - GENO:0000844 --> contradicts SEPIO:0000101
# GENO:0000841 - GENO:0000843 --> contradicts SEPIO:0000101
# GENO:0000840 - GENO:0000841 --> is_consistent_with SEPIO:0000099
# GENO:0000843 - GENO:0000844 --> is_consistent_with SEPIO:0000099
# GENO:0000840 - GENO:0000843 --> strongly_contradicts SEPIO:0000100
'''
sig = { # 'arbitrary scoring scheme increments as powers of two'
'GENO:0000840': 1, # pathogenic
'GENO:0000841': 2, # likely pathogenic
'GENO:0000844': 4, # likely benign
'GENO:0000843': 8, # benign
'GENO:0000845': 16, # uncertain significance
}
lnk = { # specific result from diff in 'arbitrary scoring scheme'
0: 'SEPIO:0000098', # is_equilavent_to
1: 'SEPIO:0000099', # is_consistent_with
2: 'SEPIO:0000101', # contradicts
3: 'SEPIO:0000101', # contradicts
4: 'SEPIO:0000099', # is_consistent_with
6: 'SEPIO:0000101', # contradicts
7: 'SEPIO:0000100', # strongly_contradicts
8: 'SEPIO:0000126', # is_inconsistent_with
12: 'SEPIO:0000126',
14: 'SEPIO:0000126',
15: 'SEPIO:0000126',
}
keys = sorted(scv_sig.keys())
for scv_a in keys:
scv_av = scv_sig.pop(scv_a)
for scv_b in scv_sig.keys():
link = lnk[abs(sig[scv_av] - sig[scv_sig[scv_b]])]
rcv_trip.append(make_spo(scv_a, link, scv_b))
rcv_trip.append(make_spo(scv_b, link, scv_a))
def digest_id(wordage):
"""
return a deterministic digest of input
the 'b' is an experiment forcing the first char to be non numeric
but valid hex; which is in no way required for RDF
but may help when using the identifier in other contexts
which do not allow identifiers to begin with a digit
:param wordage the string to hash
:returns 20 hex char digest
"""
return 'b' + hashlib.sha1(wordage.encode('utf-8')).hexdigest()[1:20]
def process_measure_set(measure_set, rcv_acc) -> Variant:
"""
Given a MeasureSet, create a Variant object
:param measure_set: XML object
:param rcv_acc: str rcv accession
:return: Variant object
"""
rcv_variant_id = measure_set.get('ID') # Short integer accession
# rcv_variant_acc = measure_set.get('Acc') # Long namespaced-zeropadded identifier
measure_set_type = measure_set.get('Type')
# Create Variant object
rcv_variant_id = 'ClinVarVariant:' + rcv_variant_id
variant = Variant(id=rcv_variant_id)
if measure_set_type in [
"Haplotype",
"Phase unknown",
"Distinct chromosomes",
"Haplotype, single variant",
# "Variant", # see below
]:
variant.variant_type = measure_set_type
elif measure_set_type == "Variant":
# We will attempt to infer the type
pass
else:
raise ValueError(
rcv_acc + " UNKNOWN VARIANT SUPERTYPE / TYPE \n" + measure_set_type)
for rcv_measure in measure_set.findall('./Measure'):
allele_name = rcv_measure.find('./Name/ElementValue[@Type="Preferred"]')
rcv_allele_label = None
if allele_name is not None:
rcv_allele_label = allele_name.text
# else:
# LOG.warning(rcv_acc + " VARIANT MISSING LABEL")
allele_type = rcv_measure.get('Type').strip()
# Create Variant object
rcv_allele_id = 'ClinVarVariant:' + rcv_measure.get('ID')
allele = Allele(
id=rcv_allele_id,
label=rcv_allele_label,
variant_type=allele_type
)
# this xpath works but is not supported by ElementTree.
# ./AttributeSet/Attribute[starts-with(@Type, "HGVS")]
for synonym in rcv_measure.findall('./AttributeSet/Attribute[@Type]'):
if synonym.get('Type') is not None and \
synonym.text is not None and \
re.match(r'^HGVS', synonym.get('Type')):
allele.synonyms.append(synonym.text)
# XRef[@DB="dbSNP"]/@ID
for dbsnp in rcv_measure.findall('./XRef[@DB="dbSNP"]'):
allele.dbsnps.append('dbSNP:' + dbsnp.get('ID'))
allele.synonyms.append('rs' + dbsnp.get('ID'))
# /RCV/MeasureSet/Measure/Name/ElementValue/[@Type="Preferred"]
# /RCV/MeasureSet/Measure/MeasureRelationship[@Type]/XRef[@DB="Gene"]/@ID
# RCV_Variant = RCV_Measure.find(
# './MeasureRelationship[@Type="variant in gene"]')
# 540074 genes overlapped by variant
# 176970 within single gene
# 24746 within multiple genes by overlap
# 5698 asserted, but not computed
# 439 near gene, upstream
# 374 variant in gene
# 54 near gene, downstream
rcv_allele_rels = rcv_measure.findall('./MeasureRelationship')
if rcv_allele_rels is None: # try letting them all through
LOG.info(ElementTree.tostring(rcv_measure).decode('utf-8'))
else:
for measure in rcv_allele_rels:
allele_rel_type = measure.get('Type').strip()
# if rcv_variant_relationship_type is not None:
# LOG.warning(
# rcv_acc +
# ' rcv_variant_relationship_type ' +
# rcv_variant_relationship_type)
# XRef[@DB="Gene"]/@ID
ncbigene_id = None
allele_gene = measure.find('./XRef[@DB="Gene"]')
if allele_gene is not None:
ncbigene_id = allele_gene.get('ID')
allele.genes.append(Gene(
id=ncbigene_id,
association_to_allele=allele_rel_type
))
variant.alleles.append(allele)
# If a variant only has one allele
# Infer variant type from allele type
# and allele ID from the variant ID
if len(variant.alleles) == 1:
variant.alleles[0].id = variant.id
variant.variant_type = variant.alleles[0].variant_type
if variant.variant_type is None:
raise ValueError("{} Unable to infer type from {}".format(
rcv_acc, measure_set_type))
return variant
def resolve(label):
'''
composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
# the decendent resolve(label) function in Source.py
# should be used instead and this f(x) removed
: return label's mapping
'''
term_id = label
if label is not None and label in LOCALTT:
term_id = LOCALTT[label]
if term_id in GLOBALTT:
term_id = GLOBALTT[term_id]
else:
LOG.warning(
'Local translation but do not have a global term_id for %s', label)
elif label is not None and label in GLOBALTT:
term_id = GLOBALTT[label]
else:
LOG.error('Do not have any mapping for label: %s', label)
return term_id
def allele_to_triples(allele, triples) -> None:
"""
Process allele info such as dbsnp ids and synonyms
:param allele: Allele
:param triples: List, Buffer to store the triples
:return: None
"""
write_spo(
allele.id, GLOBALTT['type'], resolve(allele.variant_type), triples,
subject_category=blv.terms['SequenceVariant'])
write_spo(allele.id, GLOBALTT['in taxon'], GLOBALTT['Homo sapiens'], triples)
if allele.label is not None:
write_spo(allele.id, GLOBALTT['label'], allele.label, triples)
# <ClinVarVariant:rcv_variant_id><OWL:hasDbXref><dbSNP:rs>
#
# Note that making clinvar variants and dbSNPs equivalent
# causes clique merge bugs, so best to leave them as xrefs
# Example: https://www.ncbi.nlm.nih.gov/clinvar/variation/31915/
# https://www.ncbi.nlm.nih.gov/clinvar/variation/21303/
for dbsnp_id in allele.dbsnps:
# sameAs or hasdbxref?
write_spo(
allele.id,
GLOBALTT['database_cross_reference'],
dbsnp_id,
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
for syn in allele.synonyms:
write_spo(allele.id, GLOBALTT['has_exact_synonym'], syn, triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
def record_to_triples(rcv: ClinVarRecord, triples: List, g2p_map: Dict) -> None:
"""
Given a ClinVarRecord, adds triples to the triples list
:param rcv: ClinVarRecord
:param triples: List, Buffer to store the triples
:param g2p_map: Gene to phenotype dict
:return: None
"""
# For all genotypes variants we add a type, label, and has_taxon human
write_spo(
rcv.genovar.id, GLOBALTT['type'], resolve(rcv.genovar.variant_type), triples,
subject_category=blv.terms['SequenceVariant'])
write_spo(rcv.genovar.id, GLOBALTT['in taxon'], GLOBALTT['Homo sapiens'], triples)
if rcv.genovar.label is not None:
write_spo(rcv.genovar.id, GLOBALTT['label'], rcv.genovar.label, triples)
gene_allele = [] # List of two tuples (gene, association_to_allele)
# Check the type of genovar
if isinstance(rcv.genovar, Variant):
if len(rcv.genovar.alleles) > 1:
for allele in rcv.genovar.alleles:
write_spo(
rcv.genovar.id, GLOBALTT['has_variant_part'], allele.id, triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
for allele in rcv.genovar.alleles:
allele_to_triples(allele, triples)
for gene in allele.genes:
gene_allele.append((gene.id, gene.association_to_allele))
# Hack to determine what relationship to make between a gene and variant.
# First look at the rcv variant gene relationship type to get the correct
# curie, but override has_affected_feature in cases where a gene to disease
# association has not been curated
# TODO refactor this, the intention is to avoid
# cases where a variant is mapped to two genes on different strands
# and we want to connect the correct one
# see https://github.com/monarch-initiative/monarch-app/issues/1591
# https://github.com/monarch-initiative/dipper/issues/593
if len([val[1] for val in gene_allele
if LOCALTT[val[1]] == 'has_affected_feature']) == len(gene_allele):
for gene, allele_rel in gene_allele:
is_affected = True
if not rcv.significance == GLOBALTT['pathogenic_for_condition'] \
and not rcv.significance == \
GLOBALTT['likely_pathogenic_for_condition']:
is_affected = False
else:
for condition in rcv.conditions:
if condition.medgen_id is None \
or gene not in g2p_map \
or condition.medgen_id not in g2p_map[gene]:
is_affected = False
break
if is_affected:
write_spo(
rcv.genovar.id,
resolve(allele_rel),
'NCBIGene:' + gene,
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['Gene'])
else:
write_spo(
rcv.genovar.id,
GLOBALTT['part_of'],
'NCBIGene:' + gene,
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['Gene'])
else:
for allele in rcv.genovar.alleles:
for gene in allele.genes:
write_spo(
allele.id,
GLOBALTT['part_of'],
'NCBIGene:' + gene.id,
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
elif isinstance(rcv.genovar, Genotype):
for variant in rcv.genovar.variants:
write_spo(
rcv.genovar.id, GLOBALTT['has_variant_part'], variant.id, triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
for allele in variant.alleles:
allele_to_triples(allele, triples)
for gene in allele.genes:
gene_allele.append((gene.id, gene.association_to_allele))
write_spo(
allele.id,
resolve(gene.association_to_allele),
'NCBIGene:' + gene.id,
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['SequenceVariant'])
# Zygosity if we can infer it from the type
if rcv.genovar.variant_type == "CompoundHeterozygote":
write_spo(
rcv.genovar.id,
GLOBALTT['has_zygosity'],
GLOBALTT['compound heterozygous'],
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['Zygosity'])
# If all variants are within the same single gene,
# the genotype affects the gene
if len([val[1] for val in gene_allele
if val[1] in ['within single gene', 'variant in gene']
]) == len(gene_allele) \
and len({val[0] for val in gene_allele}) == 1:
write_spo(
rcv.genovar.id,
GLOBALTT['has_affected_feature'],
'NCBIGene:' + gene_allele[0][0],
triples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['Gene'])
else:
raise ValueError("Invalid type for genovar in rcv {}".format(rcv.id))
def write_review_status_scores():
"""
Make triples that attach a "star" score to each of ClinVar's review statuses.
(Stars are basically a 0-4 rating of the review status.)
Per https://www.ncbi.nlm.nih.gov/clinvar/docs/details/
Table 1. The review status and assignment of stars( with changes made mid-2015)
Number of gold stars Description and review statuses
NO STARS:
<ReviewStatus> "no assertion criteria provided"
<ReviewStatus> "no assertion provided"
No submitter provided an interpretation with assertion criteria (no assertion
criteria provided), or no interpretation was provided (no assertion provided)
ONE STAR:
<ReviewStatus> "criteria provided, single submitter"
<ReviewStatus> "criteria provided, conflicting interpretations"
One submitter provided an interpretation with assertion criteria (criteria
provided, single submitter) or multiple submitters provided assertion criteria
but there are conflicting interpretations in which case the independent values
are enumerated for clinical significance (criteria provided, conflicting
interpretations)
TWO STARS:
<ReviewStatus> "criteria provided, multiple submitters, no conflicts"
Two or more submitters providing assertion criteria provided the same
interpretation (criteria provided, multiple submitters, no conflicts)
THREE STARS:
<ReviewStatus> "reviewed by expert panel"
reviewed by expert panel
FOUR STARS:
<ReviewStatus> "practice guideline"
practice guideline
A group wishing to be recognized as an expert panel must first apply to ClinGen
by completing the form that can be downloaded from our ftp site.
:param None
:return: list of triples that attach a "star" score to each of ClinVar's review
statuses
"""
triples = []
status_and_scores = {
"no assertion criteria provided": '0',
"no assertion provided": '0',
"criteria provided, single submitter": '1',
"criteria provided, conflicting interpretations": '1',
"criteria provided, multiple submitters, no conflicts": '2',
"reviewed by expert panel": '3',
"practice guideline": '4',
}
for status, score in status_and_scores.items():
triples.append(
make_spo(
GLOBALTT[status],
GLOBALTT['has specified numeric value'],
score))
return triples
def parse():
"""
Main function for parsing a clinvar XML release and outputting triples
"""
files = {
'f1': {
'file': 'ClinVarFullRelease_00-latest.xml.gz',
'url': CV_FTP + '/xml/ClinVarFullRelease_00-latest.xml.gz'
},
'f2': {
'file': 'gene_condition_source_id',
'url': CV_FTP + 'gene_condition_source_id'
}
}
# handle arguments for IO
argparser = argparse.ArgumentParser()
# INPUT
argparser.add_argument(
'-f', '--filename', default=files['f1']['file'],
help="gziped .xml input filename. default: '" + files['f1']['file'] + "'")
argparser.add_argument(
'-m', '--mapfile', default=files['f2']['file'],
help="input g2d mapping file. default: '" + files['f2']['file'] + "'")
argparser.add_argument(
'-i', '--inputdir', default=RPATH + '/raw/' + INAME,
help="path to input file. default: '" + RPATH + '/raw/' + INAME + "'")
argparser.add_argument(
'-l', "--localtt", default=LOCAL_TT_PATH,
help="'spud'\t'potato' default: " + LOCAL_TT_PATH)
argparser.add_argument(
'-g', "--globaltt", default=GLOBAL_TT_PATH,
help="'potato'\t'PREFIX:p123' default: " + GLOBAL_TT_PATH)
# output '/dev/stdout' would be my first choice
argparser.add_argument(
'-d', "--destination", default=RPATH + '/out',
help='directory to write into. default: "' + RPATH + '/out"')
argparser.add_argument(
'-o', "--output", default=INAME + '.nt',
help='file name to write to. default: ' + INAME + '.nt')
argparser.add_argument(
'-s', '--skolemize', default=True,
help='default: True. False keeps plain blank nodes "_:xxx"')
args = argparser.parse_args()
basename = re.sub(r'\.xml.gz$', '', args.filename) #
filename = args.inputdir + '/' + args.filename # gziped xml input file
mapfile = args.inputdir + '/' + args.mapfile
# be sure I/O paths exist
try:
os.makedirs(args.inputdir)
except FileExistsError:
pass # no problem
try:
os.makedirs(args.destination)
except FileExistsError:
pass # no problem
# check input exists
# avoid clobbering existing output until we are finished
outfile = args.destination + '/TMP_' + args.output + '_PART'
try:
os.remove(outfile)
except FileNotFoundError:
# no problem
LOG.info("fresh start for %s", outfile)
outtmp = open(outfile, 'a')
output = args.destination + '/' + args.output
# catch and release input for future study
reject = RPATH + '/' + basename + '_reject.xml'
# ignore = args.inputdir + '/' + INAME + '_ignore.txt' # unused
try:
os.remove(reject)
except FileNotFoundError:
# no problem
LOG.info("fresh start for %s", reject)
reject = open(reject, 'w')
# default to /dev/stdout if anything amiss
# Buffer to store the triples below a MONARCH_association
# before we decide to whether to keep or not"
rcvtriples = []
# Buffer to store non redundant triples between RCV sets
releasetriple = set()
# make triples to relate each review status to Clinvar's "score" - 0 to 4 stars
# releasetriple.update(set(write_review_status_scores()))
g2pmap = {}
# this needs to be read first
with open(mapfile, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
next(reader) # header
for row in reader:
if row[0] in g2pmap:
g2pmap[row[0]].append(row[3])
else:
g2pmap[row[0]] = [row[3]]
# Override default global translation table
if args.globaltt:
with open(args.globaltt) as globaltt_fh:
global GLOBALTT
GLOBALTT = yaml.safe_load(globaltt_fh)
# Overide default local translation table
if args.localtt:
with open(args.localtt) as localtt_fh:
global LOCALTT
LOCALTT = yaml.safe_load(localtt_fh)
# Overide the given Skolem IRI for our blank nodes
# with an unresovable alternative.
if args.skolemize is False:
global CURIEMAP
CURIEMAP['_'] = '_:'
# Seed releasetriple to avoid union with the empty set
# <MonarchData: + args.output> <a> <owl:Ontology>
releasetriple.add(
make_spo('MonarchData:' + args.output, GLOBALTT['type'], GLOBALTT['ontology']))
rjct_cnt = tot_cnt = 0
status_and_scores = {
"no assertion criteria provided": '0',
"no assertion provided": '0',
"criteria provided, single submitter": '1',
"criteria provided, conflicting interpretations": '1',
"criteria provided, multiple submitters, no conflicts": '2',
"reviewed by expert panel": '3',
"practice guideline": '4',
}
#######################################################
# main loop over xml
# taken in chunks composed of ClinVarSet stanzas
with gzip.open(filename, 'rt') as clinvar_fh:
# w/o specifing events it defaults to 'end'
tree = ElementTree.iterparse(clinvar_fh)
for event, element in tree:
if element.tag != 'ClinVarSet':
ReleaseSet = element
continue
else:
ClinVarSet = element
tot_cnt += 1
if ClinVarSet.find('RecordStatus').text != 'current':
LOG.warning(
"%s is not current", ClinVarSet.get('ID'))
RCVAssertion = ClinVarSet.find('./ReferenceClinVarAssertion')
# /ReleaseSet/ClinVarSet/ReferenceClinVarAssertion/ClinVarAccession/@Acc
# 162,466 2016-Mar
rcv_acc = RCVAssertion.find('./ClinVarAccession').get('Acc')
# I do not expect we care as we shouldn't keep the RCV.
if RCVAssertion.find('./RecordStatus').text != 'current':
LOG.warning(
"%s <is not current on>", rcv_acc) # + rs_dated)
ClinicalSignificance = RCVAssertion.find(
'./ClinicalSignificance/Description').text
significance = resolve(ClinicalSignificance)
# # # Child elements
#
# /RCV/Assertion
# /RCV/AttributeSet
# /RCV/Citation
# /RCV/ClinVarAccession
# /RCV/ClinicalSignificance
# /RCV/MeasureSet
# /RCV/ObservedIn
# /RCV/RecordStatus
# /RCV/TraitSet
RCV_ClinicalSignificance = RCVAssertion.find('./ClinicalSignificance')
if RCV_ClinicalSignificance is not None:
RCV_ReviewStatus = RCV_ClinicalSignificance.find('./ReviewStatus')
if RCV_ReviewStatus is not None:
rcv_review = RCV_ReviewStatus.text.strip()
#######################################################################
# Our Genotype/Subject is a sequence alteration / Variant
# which apparently was Measured
# /ReleaseSet/ClinVarSet/ReferenceClinVarAssertion/MeasureSet/@ID
# 162,466 2016-Mar
# 366,566 2017-Mar
# are now >4 types
# <GenotypeSet ID="424700" Type="CompoundHeterozygote">
# <MeasureSet ID="242681" Type="Variant">
# <MeasureSet ID="123456" Type="Haplotype">
# <Measure ID="46900" Type="single nucleotide variant">
# As of 04/2019
# Measure is no longer a direct child of ReferenceClinVarAssertion
# Unless a MeasureSet Type="Variant", both the MeasureSet ID and Measure IDs
# will be resolvable, eg:
# https://www.ncbi.nlm.nih.gov/clinvar/variation/431733/
# https://www.ncbi.nlm.nih.gov/clinvar/variation/425238/
# If MeasureSet Type == Variant, make the ID the child ID
# Genotypes can have >1 MeasureSets (Variants)
# MeasureSets can have >1 Measures (Alleles)
# Measures (Alleles) can have >1 gene
RCV_MeasureSet = RCVAssertion.find('./MeasureSet')
# Note: it is a "set" but have only seen a half dozen with two,
# all of type: copy number gain SO:0001742
genovar = None # Union[Genotype, Variant, None]
if RCV_MeasureSet is None:
# 201705 introduced GenotypeSet a CompoundHeterozygote
# with multiple variants
RCV_GenotypeSet = RCVAssertion.find('./GenotypeSet')
genovar = Genotype(
id="ClinVarVariant:" + RCV_GenotypeSet.get('ID'),
label=RCV_GenotypeSet.find(
'./Name/ElementValue[@Type="Preferred"]').text,
variant_type=RCV_GenotypeSet.get('Type')
)
for RCV_MeasureSet in RCV_GenotypeSet.findall('./MeasureSet'):
genovar.variants.append(
process_measure_set(RCV_MeasureSet, rcv_acc))
else:
genovar = process_measure_set(RCV_MeasureSet, rcv_acc)
# Create ClinVarRecord object
rcv = ClinVarRecord(
id=RCVAssertion.get('ID'),
accession=rcv_acc,
created=RCVAssertion.get('DateCreated'),
updated=RCVAssertion.get('DateLastUpdated'),
genovar=genovar,
significance=significance
)
#######################################################################
# the Object is the Disease, here is called a "trait"
# reluctantly starting with the RCV disease
# not the SCV traits as submitted due to time constraints
for RCV_TraitSet in RCVAssertion.findall('./TraitSet'):
# /RCV/TraitSet/Trait[@Type="Disease"]/@ID
# 144,327 2016-Mar
# /RCV/TraitSet/Trait[@Type="Disease"]/XRef/@DB
# 29 Human Phenotype Ontology
# 82 EFO
# 659 Gene
# 53218 Orphanet
# 57356 OMIM
# 142532 MedGen
for RCV_Trait in RCV_TraitSet.findall('./Trait[@Type="Disease"]'):
# has_medgen_id = False
rcv_disease_db = None
rcv_disease_id = None
medgen_id = None
disease_label = None
RCV_TraitName = RCV_Trait.find(
'./Name/ElementValue[@Type="Preferred"]')
if RCV_TraitName is not None:
disease_label = RCV_TraitName.text
# else:
# LOG.warning(rcv_acc + " MISSING DISEASE NAME")
for RCV_TraitXRef in RCV_Trait.findall('./XRef[@DB="OMIM"]'):
rcv_disease_db = RCV_TraitXRef.get('DB')
rcv_disease_id = RCV_TraitXRef.get('ID')
if rcv_disease_id.startswith('PS'):
rcv_disease_db = 'OMIMPS'
break
# Accept Orphanet if no OMIM
if rcv_disease_db is None or rcv_disease_id is None:
if rcv_disease_db is not None:
break
for RCV_TraitXRef in RCV_Trait.findall(
'./XRef[@DB="Orphanet"]'):
rcv_disease_db = 'ORPHA' # RCV_TraitXRef.get('DB')
rcv_disease_id = RCV_TraitXRef.get('ID')
break
# Accept MONDO if no OMIM or Orphanet # revisit priority
if rcv_disease_db is None or rcv_disease_id is None:
if rcv_disease_db is not None:
break
for RCV_TraitXRef in RCV_Trait.findall('./XRef[@DB="MONDO"]'):
rcv_disease_db = 'MONDO' # RCV_TraitXRef.get('DB')
rcv_disease_id = RCV_TraitXRef.get('ID')
break
# Always get medgen for g2p mapping file
for RCV_TraitXRef in RCV_Trait.findall('./XRef[@DB="MedGen"]'):
medgen_id = RCV_TraitXRef.get('ID')
if rcv_disease_db is None:
# use UMLS prefix instead of MedGen
# https://github.com/monarch-initiative/dipper/issues/874
rcv_disease_db = 'UMLS' # RCV_TraitXRef.get('DB')
if rcv_disease_id is None:
rcv_disease_id = medgen_id
# See if there are any leftovers. Possibilities include:
# EFO, Gene, Human Phenotype Ontology
if rcv_disease_db is None:
for RCV_TraitXRef in RCV_Trait.findall('./XRef'):
LOG.warning(
"%s has UNKNOWN DISEASE database\t %s has id %s",
rcv_acc,
RCV_TraitXRef.get('DB'),
RCV_TraitXRef.get('ID'))
# 82372 MedGen
# 58 EFO
# 1 Human Phenotype Ontology
break
rcv.conditions.append(Condition(
id=rcv_disease_id,
label=disease_label,
database=rcv_disease_db,
medgen_id=medgen_id
))
# Check that we have enough info from the RCV
# to justify parsing the related SCVs
# check that no members of rcv.genovar are none
# and that at least one condition has an id and db
if [1 for member in vars(rcv.genovar) if member is None] \
or not [
1 for condition in rcv.conditions
if condition.id is not None and condition.database is not None]:
LOG.info('%s is under specified. SKIPPING', rcv_acc)
rjct_cnt += 1
# Write this Clinvar set out so we can know what we are missing
print(
# minidom.parseString(
# ElementTree.tostring(
# ClinVarSet)).toprettyxml(
# indent=" "), file=reject)
# too slow. doubles time
ElementTree.tostring(ClinVarSet).decode('utf-8'), file=reject)
ClinVarSet.clear()
continue
# start anew
del rcvtriples[:]
# At this point we should have a ClinVarRecord object with all
# necessary data. Next convert it to triples
record_to_triples(rcv, rcvtriples, g2pmap)
#######################################################################
# Descend into each SCV grouped with the current RCV
#######################################################################
# keep a collection of a SCV's associations and patho significance call
# when this RCV's set is complete, interlink based on patho call
pathocalls = {}
for SCV_Assertion in ClinVarSet.findall('./ClinVarAssertion'):
# /SCV/AdditionalSubmitters
# /SCV/Assertion
# /SCV/AttributeSet
# /SCV/Citation
# /SCV/ClinVarAccession
# /SCV/ClinVarSubmissionID
# /SCV/ClinicalSignificance
# /SCV/Comment
# /SCV/CustomAssertionScore
# /SCV/ExternalID
# /SCV/MeasureSet
# /SCV/ObservedIn
# /SCV/RecordStatus
# /SCV/StudyDescription
# /SCV/StudyName
# /SCV/TraitSet
# init
# scv_review = scv_significance = None
# scv_assertcount += 1
for condition in rcv.conditions:
if condition.database is None:
continue
if len(condition.id.split(':')) == 1:
rcv_disease_curie = condition.database + ':' + condition.id
else:
rcv_disease_curie = ':'.join(condition.id.split(':')[-2:])
scv_id = SCV_Assertion.get('ID')
monarch_id = digest_id(rcv.id + scv_id + condition.id)
monarch_assoc = 'MONARCH:' + monarch_id
# if we parsed a review status up above, attach this review status
# to this association to allow filtering of RCV by review status
if rcv_review is not None:
write_spo(
monarch_assoc, GLOBALTT['assertion_confidence_score'],
status_and_scores[rcv_review], rcvtriples)
ClinVarAccession = SCV_Assertion.find('./ClinVarAccession')
scv_acc = ClinVarAccession.get('Acc')
scv_accver = ClinVarAccession.get('Version')
scv_orgid = ClinVarAccession.get('OrgID')
# scv_updated = ClinVarAccession.get('DateUpdated') # not used
SCV_SubmissionID = SCV_Assertion.find('./ClinVarSubmissionID')
if SCV_SubmissionID is not None:
scv_submitter = SCV_SubmissionID.get('submitter')
# blank node identifiers
_evidence_id = '_:' + digest_id(monarch_id + '_evidence')
write_spo(
_evidence_id, GLOBALTT['label'], monarch_id + '_evidence',
rcvtriples, subject_category=blv.terms['EvidenceType'])
_assertion_id = '_:' + digest_id(monarch_id + '_assertion')
write_spo(
_assertion_id, GLOBALTT['label'], monarch_id + '_assertion',
rcvtriples,
subject_category=blv.terms['InformationContentEntity'])
# TRIPLES
# <monarch_assoc><rdf:type><OBAN:association> .
write_spo(
monarch_assoc, GLOBALTT['type'], GLOBALTT['association'],
rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['OntologyClass'])
# <monarch_assoc>
# <OBAN:association_has_subject>
# <ClinVarVariant:rcv_variant_id>
write_spo(
monarch_assoc, GLOBALTT['association has subject'],
rcv.genovar.id, rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['SequenceVariant'])
# <ClinVarVariant:rcv_variant_id><rdfs:label><rcv.variant.label> .
# <monarch_assoc><OBAN:association_has_object><rcv_disease_curie> .
write_spo(
monarch_assoc, GLOBALTT['association has object'],
rcv_disease_curie, rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['Disease'])
# <rcv_disease_curie><rdfs:label><rcv_disease_label> .
# medgen might not have a disease label
if condition.label is not None:
write_spo(
rcv_disease_curie, GLOBALTT['label'], condition.label,
rcvtriples, subject_category=blv.terms['Disease'])
# <monarch_assoc><SEPIO:0000007><:_evidence_id> .
write_spo(
monarch_assoc,
GLOBALTT['has_supporting_evidence_line'],
_evidence_id,
rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['EvidenceType'])
# <monarch_assoc><SEPIO:0000015><:_assertion_id> .
write_spo(
monarch_assoc,
GLOBALTT['is_asserted_in'],
_assertion_id,
rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['InformationContentEntity'])
# <:_evidence_id><rdf:type><ECO:0000000> .
write_spo(
_evidence_id, GLOBALTT['type'], GLOBALTT['evidence'],
rcvtriples,
subject_category=blv.terms['EvidenceType'],
object_category=blv.terms['OntologyClass'])
# <:_assertion_id><rdf:type><SEPIO:0000001> .
write_spo(
_assertion_id, GLOBALTT['type'], GLOBALTT['assertion'],
rcvtriples,
subject_category=blv.terms['InformationContentEntity'],
object_category=blv.terms['OntologyClass'])
# <:_assertion_id><rdfs:label><'assertion'> .
write_spo(
_assertion_id, GLOBALTT['label'], 'ClinVarAssertion_' + scv_id,
rcvtriples,
subject_category=blv.terms['InformationContentEntity'])
# <:_assertion_id><SEPIO_0000111><:_evidence_id>
write_spo(
_assertion_id,
GLOBALTT['is_assertion_supported_by_evidence'], _evidence_id,
rcvtriples,
subject_category=blv.terms['InformationContentEntity'])
# <:_assertion_id><dc:identifier><scv_acc + '.' + scv_accver>
write_spo(
_assertion_id,
GLOBALTT['identifier'], scv_acc + '.' + scv_accver, rcvtriples,
subject_category=blv.terms['InformationContentEntity'],
object_category=blv.terms['InformationContentEntity'])
# <:_assertion_id><SEPIO:0000018><ClinVarSubmitters:scv_orgid> .
write_spo(
_assertion_id,
GLOBALTT['created_by'],
'ClinVarSubmitters:' + scv_orgid,
rcvtriples,
subject_category=blv.terms['InformationContentEntity'],
object_category=blv.terms['Provider'])
# <ClinVarSubmitters:scv_orgid><rdf:type><foaf:organization> .
write_spo(
'ClinVarSubmitters:' + scv_orgid,
GLOBALTT['type'],
GLOBALTT['organization'],
rcvtriples,
subject_category=blv.terms['Provider'],
object_category=blv.terms['Provider'])
# <ClinVarSubmitters:scv_orgid><rdfs:label><scv_submitter> .
write_spo(
'ClinVarSubmitters:' + scv_orgid, GLOBALTT['label'],
scv_submitter, rcvtriples,
subject_category=blv.terms['Provider'])
################################################################
ClinicalSignificance = SCV_Assertion.find('./ClinicalSignificance')
if ClinicalSignificance is not None:
scv_eval_date = str(
ClinicalSignificance.get('DateLastEvaluated'))
# bummer. cannot specify xpath parent '..' targeting above .find()
for SCV_AttributeSet in SCV_Assertion.findall('./AttributeSet'):
# /SCV/AttributeSet/Attribute[@Type="AssertionMethod"]
SCV_Attribute = SCV_AttributeSet.find(
'./Attribute[@Type="AssertionMethod"]')
if SCV_Attribute is not None:
SCV_Citation = SCV_AttributeSet.find('./Citation')
# <:_assertion_id><SEPIO:0000021><scv_eval_date> .
if scv_eval_date != "None":
write_spo(
_assertion_id,
GLOBALTT['Date Created'],
scv_eval_date,
rcvtriples,
subject_category=blv.terms[
'InformationContentEntity'])
scv_assert_method = SCV_Attribute.text
# need to be mapped to a <sepio:100...n> curie ????
# if scv_assert_method in TT:
# scv_assert_id = resolve(scv_assert_method)
# _assertion_method_id = '_:' + monarch_id + \
# '_assertionmethod_' + digest_id(scv_assert_method)
#
# changing to not include context till we have IRI
# blank node, would be be nice if these were only made once
_assertion_method_id = '_:' + digest_id(
scv_assert_method + '_assertionmethod')
write_spo(
_assertion_method_id, GLOBALTT['label'],
scv_assert_method + '_assertionmethod',
rcvtriples,
subject_category=blv.terms['Procedure'])
# TRIPLES specified_by
# <:_assertion_id><SEPIO:0000041><_assertion_method_id>
write_spo(
_assertion_id, GLOBALTT['is_specified_by'],
_assertion_method_id,
rcvtriples,
subject_category=blv.terms['InformationContentEntity'],
object_category=blv.terms['Procedure'])
# <_assertion_method_id><rdf:type><SEPIO:0000037>
write_spo(
_assertion_method_id,
GLOBALTT['type'],
GLOBALTT['assertion method'],
rcvtriples,
subject_category=blv.terms['Procedure'])
# <_assertion_method_id><rdfs:label><scv_assert_method>
write_spo(
_assertion_method_id, GLOBALTT['label'],
scv_assert_method, rcvtriples,
subject_category=blv.terms['Procedure'])
# <_assertion_method_id><ERO:0000480><scv_citation_url>
if SCV_Citation is not None:
SCV_Citation_URL = SCV_Citation.find('./URL')
if SCV_Citation_URL is not None:
write_spo(
_assertion_method_id, GLOBALTT['has_url'],
SCV_Citation_URL.text, rcvtriples,
subject_category=blv.terms['Procedure'],
object_category=blv.terms[
'InformationContentEntity'])
# scv_type = ClinVarAccession.get('Type') # assert == 'SCV' ?
# RecordStatus # assert =='current' ?
# SCV_ReviewStatus = ClinicalSignificance.find('./ReviewStatus')
# if SCV_ReviewStatus is not None:
# scv_review = SCV_ReviewStatus.text
# SCV/ClinicalSignificance/Citation/ID
# see also:
# SCV/ObservedIn/ObservedData/Citation/'ID[@Source="PubMed"]
for SCV_Citation in ClinicalSignificance.findall(
'./Citation/ID[@Source="PubMed"]'):
scv_citation_id = SCV_Citation.text
# TRIPLES
# has_part -> has_supporting_reference
# <:_evidence_id><SEPIO:0000124><PMID:scv_citation_id> .
write_spo(
_evidence_id,
GLOBALTT['has_supporting_reference'],
'PMID:' + scv_citation_id,
rcvtriples,
subject_category=blv.terms['EvidenceType'],
object_category=blv.terms['Publication'])
# <:monarch_assoc><dc:source><PMID:scv_citation_id>
write_spo(
monarch_assoc,
GLOBALTT['Source'], 'PMID:' + scv_citation_id,
rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['Publication'])
# <PMID:scv_citation_id><rdf:type><IAO:0000013>
write_spo(
'PMID:' + scv_citation_id,
GLOBALTT['type'],
GLOBALTT['journal article'], rcvtriples,
subject_category=blv.terms['Publication'])
# <PMID:scv_citation_id><SEPIO:0000123><literal>
scv_significance = scv_geno = None
SCV_Description = ClinicalSignificance.find('./Description')
if SCV_Description not in ['not provided', None]:
scv_significance = SCV_Description.text.strip()
scv_geno = resolve(scv_significance)
unkwn = 'has_uncertain_significance_for_condition'
if scv_geno is not None and \
LOCALTT[scv_significance] != unkwn and \
scv_significance != 'protective':
# we have the association's (SCV) pathogenicity call
# and its significance is explicit
##########################################################
# 2016 july.
# We do not want any of the proceeding triples
# unless we get here (no implicit "uncertain significance")
# TRIPLES
# <monarch_assoc>
# <OBAN:association_has_predicate>
# <scv_geno>
write_spo(
monarch_assoc,
GLOBALTT['association has predicate'],
scv_geno,
rcvtriples,
subject_category=blv.terms['Association'])
# <rcv_variant_id><scv_geno><rcv_disease_db:rcv_disease_id>
write_spo(
genovar.id, scv_geno, rcv_disease_curie,
rcvtriples,
subject_category=blv.terms['SequenceVariant'],
object_category=blv.terms['Disease'])
# <monarch_assoc><oboInOwl:hasdbxref><ClinVar:rcv_acc> .
write_spo(
monarch_assoc,
GLOBALTT['database_cross_reference'],
'ClinVar:' + rcv_acc,
rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['InformationContentEntity'])
# store association's significance to compare w/sibs
pathocalls[monarch_assoc] = scv_geno
else:
del rcvtriples[:]
continue
else:
del rcvtriples[:]
continue
# if we have deleted the triples buffer then
# there is no point in continueing (I don't think)
if not rcvtriples:
continue
# scv_assert_type = SCV_Assertion.find('./Assertion').get('Type')
# check scv_assert_type == 'variation to disease'?
# /SCV/ObservedIn/ObservedData/Citation/'ID[@Source="PubMed"]
for SCV_ObsIn in SCV_Assertion.findall('./ObservedIn'):
# /SCV/ObservedIn/Sample
# /SCV/ObservedIn/Method
for SCV_ObsData in SCV_ObsIn.findall('./ObservedData'):
for SCV_Citation in SCV_ObsData.findall('./Citation'):
for scv_citation_id in SCV_Citation.findall(
'./ID[@Source="PubMed"]'):
# has_supporting_reference
# see also: SCV/ClinicalSignificance/Citation/ID
# <_evidence_id><SEPIO:0000124><PMID:scv_citation_id>
write_spo(
_evidence_id,
GLOBALTT['has_supporting_reference'],
'PMID:' + scv_citation_id.text, rcvtriples,
subject_category=blv.terms['EvidenceType'],
object_category=blv.terms['Publication'])
# <PMID:scv_citation_id><rdf:type><IAO:0000013>
write_spo(
'PMID:' + scv_citation_id.text,
GLOBALTT['type'], GLOBALTT['journal article'],
rcvtriples,
subject_category=blv.terms['Publication'],
object_category=blv.terms[
'InformationContentEntity'])
# <:monarch_assoc><dc:source><PMID:scv_citation_id>
write_spo(
monarch_assoc,
GLOBALTT['Source'],
'PMID:' + scv_citation_id.text, rcvtriples,
subject_category=blv.terms['Association'],
object_category=blv.terms['Publication'])
for scv_pub_comment in SCV_Citation.findall(
'./Attribute[@Type="Description"]'):
# <PMID:scv_citation_id><rdfs:comment><scv_pub_comment>
write_spo(
'PMID:' + scv_citation_id.text,
GLOBALTT['comment'], scv_pub_comment,
rcvtriples,
subject_category=blv.terms['Publication'])
# for SCV_Citation in SCV_ObsData.findall('./Citation'):
for SCV_Description in SCV_ObsData.findall(
'Attribute[@Type="Description"]'):
# <_evidence_id> <dc:description> "description"
if SCV_Description.text != 'not provided':
write_spo(
_evidence_id,
GLOBALTT['description'],
SCV_Description.text,
rcvtriples,
subject_category=blv.terms['EvidenceType'])
# /SCV/ObservedIn/TraitSet
# /SCV/ObservedIn/Citation
# /SCV/ObservedIn/Co-occurrenceSet
# /SCV/ObservedIn/Comment
# /SCV/ObservedIn/XRef
# /SCV/Sample/Origin
# /SCV/Sample/Species@TaxonomyId="9606" is a constant
# scv_affectedstatus = \
# SCV_ObsIn.find('./Sample').find('./AffectedStatus').text
# /SCV/ObservedIn/Method/NamePlatform
# /SCV/ObservedIn/Method/TypePlatform
# /SCV/ObservedIn/Method/Description
# /SCV/ObservedIn/Method/SourceType
# /SCV/ObservedIn/Method/MethodType
# /SCV/ObservedIn/Method/MethodType
for SCV_OIMT in SCV_ObsIn.findall('./Method/MethodType'):
if SCV_OIMT.text != 'not provided':
scv_evidence_type = resolve(SCV_OIMT.text.strip())
if scv_evidence_type is None:
LOG.warning(
'No mapping for scv_evidence_type: %s',
SCV_OIMT.text)
continue
# blank node
_provenance_id = '_:' + digest_id(
_evidence_id + scv_evidence_type)
write_spo(
_provenance_id, GLOBALTT['label'],
_evidence_id + scv_evidence_type, rcvtriples,
subject_category=blv.terms['EvidenceType'])
# TRIPLES
# has_provenance -> has_supporting_study
# <_evidence_id><SEPIO:0000011><_provenence_id>
write_spo(
_evidence_id,
GLOBALTT['has_supporting_activity'],
_provenance_id,
rcvtriples,
subject_category=blv.terms['EvidenceType'],
object_category=blv.terms['EvidenceType'])
# <_:provenance_id><rdf:type><scv_evidence_type>
write_spo(
_provenance_id, GLOBALTT['type'], scv_evidence_type,
rcvtriples,
subject_category=blv.terms['EvidenceType'],
object_category=blv.terms['OntologyClass'])
# <_:provenance_id><rdfs:label><SCV_OIMT.text>
write_spo(
_provenance_id, GLOBALTT['label'], SCV_OIMT.text,
rcvtriples,
subject_category=blv.terms['EvidenceType'])
# End of a SCV (a.k.a. MONARCH association)
# End of the ClinVarSet.
# output triples that only are known after processing sibbling records
scv_link(pathocalls, rcvtriples)
# put this RCV's triples in the SET of all triples in this data release
releasetriple.update(set(rcvtriples))
del rcvtriples[:]
ClinVarSet.clear()
###############################################################
# first in is last out
if ReleaseSet is not None and ReleaseSet.get('Type') != 'full':
LOG.warning('Not a full release')
rs_dated = ReleaseSet.get('Dated') # "2016-03-01 (date_last_seen)
releasetriple.add(
make_spo('MonarchData:' + args.output, GLOBALTT['version_info'], rs_dated))
# not finalized
# releasetriple.add(
# make_spo(
# 'MonarchData:' + args.output, owl:versionIRI,
# 'MonarchArchive:' RELEASEDATE + '/ttl/' + args.output'))
# write all remaining triples out
print('\n'.join(list(releasetriple)), file=outtmp)
if rjct_cnt > 0:
LOG.warning(
'The %i out of %i records not included are written back to \n%s',
rjct_cnt, tot_cnt, str(reject))
outtmp.close()
reject.close()
os.replace(outfile, output)
# If the intermediate file is there it is because of a problem to fix elsewhere
# try:
# os.remove(outfile)
# except FileNotFoundError:
if __name__ == "__main__":
parse()
| {
"content_hash": "20c4faff1febff92b569167fffa0bbc9",
"timestamp": "",
"source": "github",
"line_count": 1547,
"max_line_length": 91,
"avg_line_length": 43.61085972850679,
"alnum_prop": 0.5095751934307651,
"repo_name": "TomConlin/dipper",
"id": "7ad45db856e91a6b6405a70a54c322aa6a3efd20",
"size": "67491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dipper/sources/ClinVar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "27022"
},
{
"name": "Makefile",
"bytes": "4424"
},
{
"name": "Python",
"bytes": "1340665"
},
{
"name": "Shell",
"bytes": "8549"
},
{
"name": "TSQL",
"bytes": "17023"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.schema import Index, UniqueConstraint
from freight.config import db
class Deploy(db.Model):
__tablename__ = "deploy"
__table_args__ = (
Index("idx_deploy_task_id", "task_id"),
Index("idx_deploy_app_id", "app_id"),
UniqueConstraint(
"task_id", "app_id", "environment", "number", name="unq_deploy_number"
),
)
id = Column(Integer, primary_key=True)
task_id = Column(Integer, ForeignKey("task.id", ondelete="CASCADE"), nullable=False)
app_id = Column(Integer, ForeignKey("app.id", ondelete="CASCADE"), nullable=False)
environment = Column(String(64), nullable=False, default="production")
number = Column(Integer, nullable=False)
| {
"content_hash": "1ea904cbaee02c38688cc25855864296",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 88,
"avg_line_length": 37.23809523809524,
"alnum_prop": 0.659846547314578,
"repo_name": "getsentry/freight",
"id": "9fb8f7eeeb7a73eccc863545fcf46cef812a502f",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freight/models/deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3875"
},
{
"name": "HTML",
"bytes": "243"
},
{
"name": "JavaScript",
"bytes": "63952"
},
{
"name": "Less",
"bytes": "15455"
},
{
"name": "Makefile",
"bytes": "749"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "274562"
},
{
"name": "Ruby",
"bytes": "4941"
},
{
"name": "Shell",
"bytes": "864"
}
],
"symlink_target": ""
} |
"""The PHDI image path specification resolver helper implementation."""
from dfvfs.file_io import phdi_file_io
from dfvfs.lib import definitions
from dfvfs.resolver_helpers import manager
from dfvfs.resolver_helpers import resolver_helper
class PHDIResolverHelper(resolver_helper.ResolverHelper):
"""PHDI image resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_PHDI
def NewFileObject(self, resolver_context, path_spec):
"""Creates a new file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
Returns:
FileIO: file input/output (IO) object.
"""
return phdi_file_io.PHDIFile(resolver_context, path_spec)
manager.ResolverHelperManager.RegisterHelper(PHDIResolverHelper())
| {
"content_hash": "9efb8d74b34db19d671290626dbc98b7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 29.925925925925927,
"alnum_prop": 0.7524752475247525,
"repo_name": "joachimmetz/dfvfs",
"id": "e73457908923993c9931de6d49fabd43ca7672b6",
"size": "832",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dfvfs/resolver_helpers/phdi_resolver_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
from tempest.lib.common.utils import data_utils
from tempest import test
from neutron.tests.tempest.api import base
class MeteringTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete Metering labels
List, Show, Create, Delete Metering labels rules
"""
@classmethod
@test.requires_ext(extension="metering", service="network")
def resource_setup(cls):
super(MeteringTestJSON, cls).resource_setup()
description = "metering label created by tempest"
name = data_utils.rand_name("metering-label")
cls.metering_label = cls.create_metering_label(name, description)
remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4
else "fd02::/64")
direction = "ingress"
cls.metering_label_rule = cls.create_metering_label_rule(
remote_ip_prefix, direction,
metering_label_id=cls.metering_label['id'])
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
self.admin_client.delete_metering_label(metering_label_id)
# Asserting that the label is not found in list after deletion
labels = self.admin_client.list_metering_labels(id=metering_label_id)
self.assertEqual(len(labels['metering_labels']), 0)
def _delete_metering_label_rule(self, metering_label_rule_id):
# Deletes a rule and verifies if it is deleted or not
self.admin_client.delete_metering_label_rule(
metering_label_rule_id)
# Asserting that the rule is not found in list after deletion
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule_id))
self.assertEqual(len(rules['metering_label_rules']), 0)
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_metering_labels(self):
# Verify label filtering
body = self.admin_client.list_metering_labels(id=33)
metering_labels = body['metering_labels']
self.assertEqual(0, len(metering_labels))
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_delete_metering_label_with_filters(self):
# Creates a label
name = data_utils.rand_name('metering-label-')
description = "label created by tempest"
body = self.admin_client.create_metering_label(name=name,
description=description)
metering_label = body['metering_label']
self.addCleanup(self._delete_metering_label,
metering_label['id'])
# Assert whether created labels are found in labels list or fail
# if created labels are not found in labels list
labels = (self.admin_client.list_metering_labels(
id=metering_label['id']))
self.assertEqual(len(labels['metering_labels']), 1)
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_metering_label(self):
# Verifies the details of a label
body = self.admin_client.show_metering_label(self.metering_label['id'])
metering_label = body['metering_label']
self.assertEqual(self.metering_label['id'], metering_label['id'])
self.assertEqual(self.metering_label['tenant_id'],
metering_label['tenant_id'])
self.assertEqual(self.metering_label['name'], metering_label['name'])
self.assertEqual(self.metering_label['description'],
metering_label['description'])
@test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
def test_list_metering_label_rules(self):
# Verify rule filtering
body = self.admin_client.list_metering_label_rules(id=33)
metering_label_rules = body['metering_label_rules']
self.assertEqual(0, len(metering_label_rules))
@test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
def test_create_delete_metering_label_rule_with_filters(self):
# Creates a rule
remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
else "fd03::/64")
body = (self.admin_client.create_metering_label_rule(
remote_ip_prefix=remote_ip_prefix,
direction="ingress",
metering_label_id=self.metering_label['id']))
metering_label_rule = body['metering_label_rule']
self.addCleanup(self._delete_metering_label_rule,
metering_label_rule['id'])
# Assert whether created rules are found in rules list or fail
# if created rules are not found in rules list
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule['id']))
self.assertEqual(len(rules['metering_label_rules']), 1)
@test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
def test_show_metering_label_rule(self):
# Verifies the details of a rule
body = (self.admin_client.show_metering_label_rule(
self.metering_label_rule['id']))
metering_label_rule = body['metering_label_rule']
self.assertEqual(self.metering_label_rule['id'],
metering_label_rule['id'])
self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
metering_label_rule['remote_ip_prefix'])
self.assertEqual(self.metering_label_rule['direction'],
metering_label_rule['direction'])
self.assertEqual(self.metering_label_rule['metering_label_id'],
metering_label_rule['metering_label_id'])
self.assertFalse(metering_label_rule['excluded'])
class MeteringIpV6TestJSON(MeteringTestJSON):
_ip_version = 6
| {
"content_hash": "95c696127068d190dc5f80ef29f222eb",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 47.416,
"alnum_prop": 0.6367470895900118,
"repo_name": "bigswitch/neutron",
"id": "42b8e489320a8cea439f3800eb0f5bf59196f331",
"size": "6534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/tempest/api/test_metering_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8468247"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
} |
"""
Compare the errors of some schemes.
"""
import numpy as np
import quadrature
from matplotlib import pyplot as plt
from matplotlib import style
style.use("ggplot")
def f(x):
return np.exp(x[0]) * np.exp(x[1])
schemes = (
[quadrature.triangle.Strang(6)]
+ [quadrature.triangle.Cubtri()]
+ [quadrature.triangle.LynessJespersen(6)]
)
sample_sizes = [0.5 ** k for k in range(10)]
errors = np.empty((len(schemes), len(sample_sizes)))
for i, scheme in enumerate(schemes):
for j, a in enumerate(sample_sizes):
triangle = np.array([[0.0, 0.0], [a, 0.0], [0.0, a]])
exact_value = 1.0 + np.exp(a) * (a - 1.0)
val = quadrature.triangle.integrate(f, triangle, scheme)
errors[i][j] = abs(exact_value - val)
for scheme, err in zip(schemes, errors):
plt.loglog(sample_sizes, err, "o-", label=scheme.name)
plt.legend(loc="upper left", bbox_to_anchor=(1, 1))
plt.show()
| {
"content_hash": "1ade80109ecb17dda8380144a5a8c738",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 25.583333333333332,
"alnum_prop": 0.6406080347448425,
"repo_name": "nschloe/quadpy",
"id": "a5ee4e7da7930dab3f9f8d3596c28f1d4b353326",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/compare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "897850"
}
],
"symlink_target": ""
} |
from tg import expose, redirect, flash, config, validate, request
from tg.decorators import with_trailing_slash, without_trailing_slash
from formencode import validators as V
from pylons import c
from webob import exc
import pymongo
from allura.lib import search
from allura.app import SitemapEntry
from allura import model as M
from allura.lib.widgets import project_list as plw
from allura.controllers import BaseController
class W:
project_summary = plw.ProjectSummary()
class SearchController(BaseController):
@expose('jinja:allura:templates/search_index.html')
@validate(dict(q=V.UnicodeString(),
history=V.StringBool(if_empty=False)))
@with_trailing_slash
def index(self, q=None, history=False, **kw):
results = []
count=0
if not q:
q = ''
else:
results = search.search(
q,
fq='is_history_b:%s' % history)
if results: count=results.hits
return dict(q=q, history=history, results=results or [], count=count)
class ProjectBrowseController(BaseController):
def __init__(self, category_name=None, parent_category=None):
self.parent_category = parent_category
self.nav_stub = '/browse/'
self.additional_filters = {}
if category_name:
parent_id = parent_category and parent_category._id or None
self.category = M.ProjectCategory.query.find(dict(name=category_name,parent_id=parent_id)).first()
if not self.category:
raise exc.HTTPNotFound, request.path
else:
self.category = None
def _build_title(self):
title = "All Projects"
if self.category:
title = self.category.label
if self.parent_category:
title = "%s: %s" % (self.parent_category.label, title)
return title
def _build_nav(self):
categories = M.ProjectCategory.query.find({'parent_id':None}).sort('name').all()
nav = []
for cat in categories:
nav.append(SitemapEntry(
cat.label,
self.nav_stub+cat.name,
))
if (self.category and self.category._id == cat._id and cat.subcategories) or (
self.parent_category and self.parent_category._id == cat._id):
for subcat in cat.subcategories:
nav.append(SitemapEntry(
subcat.label,
self.nav_stub+cat.name+'/'+subcat.name,
))
return nav
def _find_projects(self,sort='alpha', limit=None, start=0):
if self.category:
ids = [self.category._id]
# warning! this is written with the assumption that categories
# are only two levels deep like the existing site
if self.category.subcategories:
ids = ids + [cat._id for cat in self.category.subcategories]
pq = M.Project.query.find(dict(category_id={'$in':ids}, deleted=False, **self.additional_filters))
else:
pq = M.Project.query.find(dict(deleted=False, **self.additional_filters))
if sort=='alpha':
pq.sort('name')
else:
pq.sort('last_updated', pymongo.DESCENDING)
count = pq.count()
if limit:
projects = pq.skip(start).limit(int(limit)).all()
else:
projects = pq.all()
return (projects, count)
@expose()
def _lookup(self, category_name, *remainder):
return ProjectBrowseController(category_name=category_name, parent_category=self.category), remainder
@expose('jinja:allura:templates/project_list.html')
@without_trailing_slash
def index(self, **kw):
c.project_summary = W.project_summary
projects, count = self._find_projects()
title=self._build_title()
c.custom_sidebar_menu = self._build_nav()
return dict(projects=projects,title=title,text=None)
| {
"content_hash": "d38cec366d8e13f8710987b30a5bd243",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 110,
"avg_line_length": 38.009433962264154,
"alnum_prop": 0.6013899230578307,
"repo_name": "Bitergia/allura",
"id": "2b9ed25a5a782246bef029bfcd03e39a7644df31",
"size": "4029",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Allura/allura/controllers/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2985957"
},
{
"name": "JavaScript",
"bytes": "647110"
},
{
"name": "Perl",
"bytes": "184"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1990317"
},
{
"name": "Ruby",
"bytes": "4134"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
} |
"Makes working with XML feel like you are working with JSON"
"Note: This is not our library. Kudos go to Martin Blech of https://github.com/martinblech/xmltodict"
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.5.1'
__license__ = 'MIT'
class ParsingInterrupted(Exception): pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True):
self.path = []
self.stack = []
self.data = None
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
def startElement(self, name, attrs):
attrs = self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attrs = self.dict_constructor(
(self.attr_prefix+key, value)
for (key, value) in attrs.items())
else:
attrs = None
self.item = attrs or None
self.data = None
def endElement(self, name):
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = self.data
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
item, data = self.item, self.data
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data is not None:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = self.data = None
self.path.pop()
def characters(self, data):
if not self.data:
self.data = data
else:
self.data += self.cdata_separator + data
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
item[key] = data
return item
def parse(xml_input, encoding='utf-8', expat=expat, *args, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
# >>> doc = xmltodict.parse(\"\"\"
# ... <a prop="x">
# ... <b>1</b>
# ... <b>2</b>
# ... </a>
# ... \"\"\")
# >>> doc['a']['@prop']
# u'x'
# >>> doc['a']['b']
# [u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
# >>> def handle(path, item):
# ... print 'path:%s item:%s' % (path, item)
# ... return True
# ...
# >>> xmltodict.parse(\"\"\"
# ... <a prop="x">
# ... <b>1</b>
# ... <b>2</b>
# ... </a>\"\"\", item_depth=2, item_callback=handle)
# path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
# path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`, `key`
and `value` as positional arguments and returns a new `(key, value)` pair
where both `key` and `value` may have changed. Usage example::
# >>> def postprocessor(path, key, value):
# ... try:
# ... return key + ':int', int(value)
# ... except (ValueError, TypeError):
# ... return key, value
# >>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
# ... postprocessor=postprocessor)
# OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
# >>> import defusedexpat
# >>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
# OrderedDict([(u'a', u'hello')])
"""
handler = _DictSAXHandler(*args, **kwargs)
parser = expat.ParserCreate()
parser.ordered_attributes = True
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
try:
parser.ParseFile(xml_input)
except (TypeError, AttributeError):
if isinstance(xml_input, _unicode):
xml_input = xml_input.encode(encoding)
parser.Parse(xml_input, True)
return handler.item
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
root=True,
preprocessor=None):
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if not isinstance(value, (list, tuple)):
value = [value]
if root and len(value) > 1:
raise ValueError('document with multiple roots')
for v in value:
if v is None:
v = OrderedDict()
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
content_handler.startElement(key, AttributesImpl(attrs))
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, False, preprocessor)
if cdata is not None:
content_handler.characters(cdata)
content_handler.endElement(key)
def unparse(item, output=None, encoding='utf-8', **kwargs):
((key, value),) = item.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), sys.stdout)
return True
try:
root = parse(sys.stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass
| {
"content_hash": "fa6243fbf8f58f2ae87d67201580f8a2",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 101,
"avg_line_length": 34.55789473684211,
"alnum_prop": 0.558026195552848,
"repo_name": "norsecorp/ipviking-api-python",
"id": "9db7c1fc72e5d6ccbe1925f7cf902bfc2f9cce84",
"size": "9871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipviking_api_python/helpers/xmltodict.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54952"
}
],
"symlink_target": ""
} |
"""This is an experimental deployment script for Oppia. It should only be used
for experimental testing, since it omits several safeguards: for example, it
does not run tests and it does not use a 'deploy_data' folder.
USE THIS SCRIPT AT YOUR OWN RISK!
Note:
1. Before running this script, you must install third-party dependencies by
running
bash scripts/start.sh
at least once.
2. This script should be run from the oppia root folder:
python scripts/experimental_deploy.py --app_name=[APP_NAME]
where [APP_NAME] is the name of your app. Note that the root folder MUST be
named 'oppia'.
"""
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import argparse
import datetime
import os
import shutil
import subprocess
# pylint: enable=wrong-import-order
import common # pylint: disable=relative-import
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--app_name', help='name of the app to deploy to', type=str)
PARSED_ARGS = _PARSER.parse_args()
if PARSED_ARGS.app_name:
APP_NAME = PARSED_ARGS.app_name
if APP_NAME in ['oppiaserver', 'oppiatestserver']:
raise Exception(
'This script should not be used for updating %s. Please use '
'scripts/deploy.py instead.' % APP_NAME)
else:
raise Exception('No app name specified.')
CURRENT_DATETIME = datetime.datetime.utcnow()
RELEASE_DIR_NAME = 'deploy-EXPERIMENT-%s-%s' % (
'-'.join('-'.join(APP_NAME.split('.')).split(':')),
CURRENT_DATETIME.strftime('%Y%m%d-%H%M%S'))
RELEASE_DIR_PATH = os.path.join(os.getcwd(), '..', RELEASE_DIR_NAME)
APPCFG_PATH = os.path.join(
'..', 'oppia_tools', 'google_appengine_1.9.50', 'google_appengine',
'appcfg.py')
LOG_FILE_PATH = os.path.join('..', 'experimental_deploy.log')
THIRD_PARTY_DIR = os.path.join('.', 'third_party')
def preprocess_release():
"""Pre-processes release files.
This function should be called from within RELEASE_DIR_NAME. Currently it
does the following:
(1) Changes the app name in app.yaml to APP_NAME.
"""
# Change the app name in app.yaml.
f = open('app.yaml', 'r')
content = f.read()
os.remove('app.yaml')
content = content.replace('oppiaserver', APP_NAME)
d = open('app.yaml', 'w+')
d.write(content)
# Check that the current directory is correct.
common.require_cwd_to_be_oppia()
CURRENT_GIT_VERSION = subprocess.check_output(
['git', 'rev-parse', 'HEAD']).strip()
print ''
print 'Starting experimental deployment process.'
if not os.path.exists(THIRD_PARTY_DIR):
raise Exception(
'Could not find third_party directory at %s. Please run start.sh '
'prior to running this script.' % THIRD_PARTY_DIR)
# Create a folder in which to save the release candidate.
print 'Ensuring that the release directory parent exists'
common.ensure_directory_exists(os.path.dirname(RELEASE_DIR_PATH))
# Copy files to the release directory. Omits the .git subfolder.
print 'Copying files to the release directory'
shutil.copytree(
os.getcwd(), RELEASE_DIR_PATH, ignore=shutil.ignore_patterns('.git'))
# Change the current directory to the release candidate folder.
with common.CD(RELEASE_DIR_PATH):
if not os.getcwd().endswith(RELEASE_DIR_NAME):
raise Exception(
'Invalid directory accessed during deployment: %s' % os.getcwd())
print 'Changing directory to %s' % os.getcwd()
print 'Preprocessing release...'
preprocess_release()
# Do a build; ensure there are no errors.
print 'Building and minifying scripts...'
subprocess.check_output(['python', 'scripts/build.py'])
# Deploy to GAE.
subprocess.check_output([APPCFG_PATH, 'update', '.'])
# Writing log entry.
common.ensure_directory_exists(os.path.dirname(LOG_FILE_PATH))
with open(LOG_FILE_PATH, 'a') as log_file:
log_file.write(
'Successfully completed experimental deployment to %s at %s '
'(version %s)\n' % (
APP_NAME, CURRENT_DATETIME.strftime('%Y-%m-%d %H:%M:%S'),
CURRENT_GIT_VERSION))
print 'Returning to oppia/ root directory.'
print 'Done!'
| {
"content_hash": "77611eb5f5b3ad8dfda62080d90ba373",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 31.246268656716417,
"alnum_prop": 0.6782899450680678,
"repo_name": "terrameijar/oppia",
"id": "7fd729453cbb90c6ca4f26c761961fa130138de0",
"size": "4792",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "scripts/experimental_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "95342"
},
{
"name": "HTML",
"bytes": "850374"
},
{
"name": "JavaScript",
"bytes": "2597367"
},
{
"name": "Python",
"bytes": "3177521"
},
{
"name": "Shell",
"bytes": "46904"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from scipy.stats import norm
from numpy.random import normal
import pytest
import sys
sys.path.append("..")
from chainconsumer.chain import Chain
from chainconsumer.chainconsumer import ChainConsumer
class TestChain(object):
d = normal(size=(100, 3))
d2 = normal(size=(1000000, 3))
bad = d.copy()
bad[0, 0] = np.nan
p = ["a", "b", "c"]
n = "A"
w = np.ones(100)
w2 = np.ones(1000000)
def test_good_chain(self):
Chain(self.d, self.p, self.n)
def test_good_chain_weights1(self):
Chain(self.d, self.p, self.n, self.w)
def test_good_chain_weights2(self):
Chain(self.d, self.p, self.n, self.w[None])
def test_good_chain_weights3(self):
Chain(self.d, self.p, self.n, self.w[None].T)
def test_chain_with_bad_weights1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, weights=np.ones((50, 1)))
def test_chain_with_bad_weights2(self):
with pytest.raises(AssertionError):
w = self.w.copy()
w[10] = np.inf
Chain(self.d, self.p, self.n, weights=w)
def test_chain_with_bad_weights3(self):
with pytest.raises(AssertionError):
w = self.w.copy()
w[10] = np.nan
Chain(self.d, self.p, self.n, weights=w)
def test_chain_with_bad_weights4(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, weights=np.ones((50, 2)))
def test_chain_with_bad_name1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, 1)
def test_chain_with_bad_name2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, None)
def test_chain_with_bad_params1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p[:-1], self.n)
def test_chain_with_bad_params2(self):
with pytest.raises(AssertionError):
Chain(self.d, ["A", "B", 0], self.n)
def test_chain_with_bad_params3(self):
with pytest.raises(AssertionError):
Chain(self.d, None, self.n)
def test_chain_with_bad_chain_initial_success1(self):
Chain(self.bad, self.p, self.n)
def test_chain_with_bad_chain_initial_success2(self):
c = Chain(self.bad, self.p, self.n)
c.get_data(1)
def test_chain_with_bad_chain_fails_on_access1(self):
c = Chain(self.bad, self.p, self.n)
with pytest.raises(AssertionError):
c.get_data(0)
def test_chain_with_bad_chain_fails_on_access2(self):
c = Chain(self.bad, self.p, self.n)
with pytest.raises(AssertionError):
c.get_data(self.p[0])
def test_good_grid(self):
Chain(self.d, self.p, self.n, grid=False)
def test_bad_grid1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, grid=0)
def test_bad_grid2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, grid=None)
def test_bad_grid3(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, grid="False")
def test_good_walkers1(self):
Chain(self.d, self.p, self.n, walkers=10)
def test_good_walkers2(self):
Chain(self.d, self.p, self.n, walkers=10.0)
def test_bad_walkers1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, walkers=2000)
def test_bad_walkers2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, walkers=11)
def test_bad_walkers3(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, walkers="5")
def test_bad_walkers4(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, walkers=2.5)
def test_good_posterior1(self):
Chain(self.d, self.p, self.n, posterior=np.ones(100))
def test_good_posterior2(self):
Chain(self.d, self.p, self.n, posterior=np.ones((100, 1)))
def test_good_posterior3(self):
Chain(self.d, self.p, self.n, posterior=np.ones((1, 100)))
def test_bad_posterior1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, posterior=np.ones((2, 50)))
def test_bad_posterior2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, posterior=np.ones(50))
def test_bad_posterior3(self):
posterior = np.ones(100)
posterior[0] = np.nan
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, posterior=posterior)
def test_bad_posterior4(self):
posterior = np.ones(100)
posterior[0] = np.inf
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, posterior=posterior)
def test_bad_posterior5(self):
posterior = np.ones(100)
posterior[0] = -np.inf
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, posterior=posterior)
def test_good_num_free_params1(self):
Chain(self.d, self.p, self.n, num_free_params=2)
def test_good_num_free_params2(self):
Chain(self.d, self.p, self.n, num_free_params=2.0)
def test_bad_num_free_params1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_free_params="2.5")
def test_bad_num_free_params2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_free_params=np.inf)
def test_bad_num_free_params3(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_free_params=np.nan)
def test_bad_num_free_params4(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_free_params=-10)
def test_good_num_eff_data_points1(self):
Chain(self.d, self.p, self.n, num_eff_data_points=2)
def test_good_num_eff_data_points2(self):
Chain(self.d, self.p, self.n, num_eff_data_points=20.4)
def test_bad_num_eff_data_points1(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_eff_data_points="2.5")
def test_bad_num_eff_data_points2(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_eff_data_points=np.nan)
def test_bad_num_eff_data_points3(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_eff_data_points=np.inf)
def test_bad_num_eff_data_points4(self):
with pytest.raises(AssertionError):
Chain(self.d, self.p, self.n, num_eff_data_points=-100)
def test_color_data_none(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, name=self.n, weights=self.w, posterior=np.ones(100))
c.configure(color_params=None)
chain = c.chains[0]
assert chain.get_color_data() is None
def test_color_data_p1(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, name=self.n, weights=self.w, posterior=np.ones(100))
c.configure(color_params=self.p[0])
chain = c.chains[0]
assert np.all(chain.get_color_data() == self.d[:, 0])
def test_color_data_w(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, name=self.n, weights=self.w, posterior=np.ones(100))
c.configure(color_params="weights")
chain = c.chains[0]
assert np.all(chain.get_color_data() == self.w)
def test_color_data_logw(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, name=self.n, weights=self.w, posterior=np.ones(100))
c.configure(color_params="log_weights")
chain = c.chains[0]
assert np.all(chain.get_color_data() == np.log(self.w))
def test_color_data_posterior(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, name=self.n, weights=self.w, posterior=np.ones(100))
c.configure(color_params="posterior")
chain = c.chains[0]
assert np.all(chain.get_color_data() == np.ones(100))
def test_override_color(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, color="#4286f4")
c.configure()
assert c.chains[0].config["color"] == "#4286f4"
def test_override_linewidth(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, linewidth=2.0)
c.configure(linewidths=[100])
assert c.chains[0].config["linewidth"] == 100
def test_override_linestyle(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, linestyle="--")
c.configure()
assert c.chains[0].config["linestyle"] == "--"
def test_override_shade_alpha(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, shade_alpha=0.8)
c.configure()
assert c.chains[0].config["shade_alpha"] == 0.8
def test_override_kde(self):
c = ChainConsumer()
c.add_chain(self.d, parameters=self.p, kde=2.0)
c.configure()
assert c.chains[0].config["kde"] == 2.0
def test_override_kde_grid(self):
c = ChainConsumer()
x, y = np.linspace(0, 10, 10), np.linspace(0, 10, 10)
z = np.ones((10, 10))
c.add_chain([x, y], weights=z, grid=True, kde=2.0)
c.configure()
assert not c.chains[0].config["kde"]
def test_cache_invalidation(self):
c = ChainConsumer()
c.add_chain(normal(size=(1000000, 1)), parameters=["a"])
c.configure(summary_area=0.68)
summary1 = c.analysis.get_summary()
c.configure(summary_area=0.95)
summary2 = c.analysis.get_summary()
assert np.isclose(summary1["a"][0], -1, atol=0.03)
assert np.isclose(summary2["a"][0], -2, atol=0.03)
assert np.isclose(summary1["a"][2], 1, atol=0.03)
assert np.isclose(summary2["a"][2], 2, atol=0.03)
def test_pass_in_dataframe1(self):
df = pd.DataFrame(self.d2, columns=self.p)
c = ChainConsumer()
c.add_chain(df)
summary1 = c.analysis.get_summary()
assert np.isclose(summary1["a"][0], -1, atol=0.03)
assert np.isclose(summary1["a"][1], 0, atol=0.05)
assert np.isclose(summary1["a"][2], 1, atol=0.03)
assert np.isclose(summary1["b"][0], -1, atol=0.03)
assert np.isclose(summary1["c"][0], -1, atol=0.03)
def test_pass_in_dataframe2(self):
df = pd.DataFrame(self.d2, columns=self.p)
df["weight"] = self.w2
c = ChainConsumer()
c.add_chain(df)
summary1 = c.analysis.get_summary()
assert np.isclose(summary1["a"][0], -1, atol=0.03)
assert np.isclose(summary1["a"][1], 0, atol=0.05)
assert np.isclose(summary1["a"][2], 1, atol=0.03)
assert np.isclose(summary1["b"][0], -1, atol=0.03)
assert np.isclose(summary1["c"][0], -1, atol=0.03)
def test_pass_in_dataframe3(self):
data = np.random.uniform(-4, 6, size=(1000000, 1))
weight = norm.pdf(data)
df = pd.DataFrame(data, columns=["a"])
df["weight"] = weight
c = ChainConsumer()
c.add_chain(df)
summary1 = c.analysis.get_summary()
assert np.isclose(summary1["a"][0], -1, atol=0.03)
assert np.isclose(summary1["a"][1], 0, atol=0.05)
assert np.isclose(summary1["a"][2], 1, atol=0.03)
if __name__ == "__main__":
import sys
sys.path.append("..")
c = TestChain()
c.test_pass_in_dataframe2() | {
"content_hash": "8bd8c82de9a4d9465fcfd62949838051",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 99,
"avg_line_length": 35.29216867469879,
"alnum_prop": 0.6004950072544166,
"repo_name": "Samreay/ChainConsumer",
"id": "227de83da590cc6df67101a96aa5cc9641f73f8d",
"size": "11717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "247101"
},
{
"name": "Shell",
"bytes": "1806"
},
{
"name": "TeX",
"bytes": "1819"
}
],
"symlink_target": ""
} |
from pythonforandroid.recipe import PythonRecipe
class PycryptodomeRecipe(PythonRecipe):
version = '3.6.3'
url = 'https://github.com/Legrandin/pycryptodome/archive/v{version}.tar.gz'
depends = ['setuptools', 'cffi']
recipe = PycryptodomeRecipe()
| {
"content_hash": "f4e7cfb05feabf62aec06ef4d59daf77",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 26.2,
"alnum_prop": 0.732824427480916,
"repo_name": "rnixx/python-for-android",
"id": "9418600a297662b5d464fb48ec151c72eb447d9b",
"size": "262",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pythonforandroid/recipes/pycryptodome/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70942"
},
{
"name": "C++",
"bytes": "491"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3487"
},
{
"name": "Dockerfile",
"bytes": "4440"
},
{
"name": "HTML",
"bytes": "11631"
},
{
"name": "Java",
"bytes": "517112"
},
{
"name": "Makefile",
"bytes": "27307"
},
{
"name": "Python",
"bytes": "1359684"
},
{
"name": "Shell",
"bytes": "5340"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from xml.dom import minidom
import logging
from django.utils.timezone import now, utc
from celery.task import task
from celery.task.sets import subtask
from gargoyle import gargoyle
from eve_proxy.exceptions import *
from eve_proxy.models import CachedDocument
from eve_api.api_exceptions import *
from eve_api.models import EVEPlayerCorporation, EVEPlayerCharacter, EVEPlayerCharacterRole, EVEPlayerCharacterSkill, EVESkill, EVEAccount, EVEPlayerCharacterEmploymentHistory
from eve_api.app_defines import *
from eve_api.utils import basic_xml_parse, basic_xml_parse_doc
@task()
def import_eve_character(character_id, key_id=None, callback=None, **kwargs):
"""
Imports a character from the API, providing a API key will populate
further details. Returns a single EVEPlayerCharacter object
"""
log = import_eve_character.get_logger()
try:
pchar = import_eve_character_func(character_id, key_id, log)
except APIAccessException, exc:
log.debug('Error importing character - flagging for retry')
import_eve_character.retry(args=[character_id, key_id, callback], exc=exc, kwargs=kwargs)
if not pchar:
log.debug('Error importing character %s' % character_id)
else:
if callback:
subtask(callback).delay(character=pchar.id)
else:
return pchar
@task()
def import_eve_characters(character_list, key_id=None, callback=None, **kwargs):
"""
Imports characters from the API, providing a API key will populate
further details. Returns a list of EVEPlayerCharacter objects
"""
log = import_eve_characters.get_logger()
try:
results = [import_eve_character_func(char, key_id, log) for char in character_list]
except APIAccessException, exc:
log.debug('Error importing characters - flagging for retry')
import_eve_characters.retry(args=[character_list, key_id, callback], exc=exc, kwargs=kwargs)
if callback:
subtask(callback).delay(characters=results)
else:
return results
def import_eve_character_func(character_id, key_id=None, logger=logging.getLogger(__name__)):
if int(character_id) >= 3000000 and int(character_id) < 4000000:
# NPC character
pchar, created = EVEPlayerCharacter.objects.get_or_create(pk=character_id)
return pchar
try:
char_doc = CachedDocument.objects.api_query('/eve/CharacterInfo.xml.aspx', params={'characterID': character_id}, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving CharacterInfo.xml.aspx for Character ID %s - %s' % (character_id, exc))
raise APIAccessException('Error retrieving CharacterInfo.xml.aspx for Character ID %s - %s' % (character_id, exc))
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
logger.debug('EVE API Error enountered in API document')
return
values = d['result']
pchar, created = EVEPlayerCharacter.objects.get_or_create(id=character_id)
# Set the character's name, avoid oddities in the XML feed
if not values['characterName'] == {}:
pchar.name = values['characterName']
else:
pchar.name = ""
pchar.security_status = values['securityStatus']
# Set corporation and join date
corp, created = EVEPlayerCorporation.objects.get_or_create(pk=values['corporationID'])
from eve_api.tasks.corporation import import_corp_details
if created or not corp.name or corp.api_last_updated < (now() - timedelta(hours=12)):
import_corp_details.delay(values['corporationID'])
pchar.corporation = corp
pchar.corporation_date = datetime.strptime(values['corporationDate'], "%Y-%m-%d %H:%M:%S").replace(tzinfo=utc)
# Derrive Race value from the choices
for v in API_RACES_CHOICES:
val, race = v
if race == values['race']:
pchar.race = val
break
# Import employment history if its made available
if 'employmentHistory' in values:
reclist = pchar.employmenthistory.values_list('pk', flat=True)
for emp in values['employmentHistory']:
if not emp['recordID'] in reclist:
corp, created = EVEPlayerCorporation.objects.get_or_create(pk=emp['corporationID'])
if created:
import_corp_details.delay(emp['corporationID'])
startdate = datetime.strptime(emp['startDate'], "%Y-%m-%d %H:%M:%S").replace(tzinfo=utc)
eobj, created = EVEPlayerCharacterEmploymentHistory.objects.get_or_create(pk=emp['recordID'], corporation=corp, character=pchar, start_date=startdate)
# We've been passed a Key ID, try and work with it
if key_id:
try:
acc = EVEAccount.objects.get(pk=key_id)
except EVEAccount.DoesNotExist:
acc = None
else:
acc = None
# If we have a key, call CharSheet
if acc and acc.has_access(3) and not acc.api_keytype == API_KEYTYPE_CORPORATION:
if gargoyle.is_active('eve-cak') and acc.is_cak:
auth_params = {'keyid': acc.api_user_id, 'vcode': acc.api_key, 'characterid': character_id }
else:
auth_params = {'userID': acc.api_user_id, 'apiKey': acc.api_key, 'characterID': character_id }
try:
char_doc = CachedDocument.objects.api_query('/char/CharacterSheet.xml.aspx', params=auth_params, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving CharacterSheet.xml.aspx for User ID %s, Character ID %s - %s' % (acc.pk, character_id, exc))
raise APIAccessException('Error retrieving CharacterSheet.xml.aspx for User ID %s, Character ID %s - %s' % (acc.pk, character_id, exc.value))
doc = basic_xml_parse_doc(char_doc)['eveapi']
if not 'error' in doc:
values = doc['result']
pchar.name = values['name']
pchar.balance = values['balance']
pchar.attrib_intelligence = values['attributes']['intelligence']
pchar.attrib_charisma = values['attributes']['charisma']
pchar.attrib_perception = values['attributes']['perception']
pchar.attrib_willpower = values['attributes']['willpower']
pchar.attrib_memory = values['attributes']['memory']
# Process the character's skills
pchar.total_sp = 0
for skill in values.get('skills', None):
skillobj, created = EVESkill.objects.get_or_create(pk=skill['typeID'])
charskillobj, created = EVEPlayerCharacterSkill.objects.get_or_create(skill=skillobj, character=pchar)
if created or not charskillobj.level == int(skill['level']) or not charskillobj.skillpoints == int(skill['skillpoints']):
charskillobj.level = int(skill['level'])
charskillobj.skillpoints = int(skill['skillpoints'])
charskillobj.save()
pchar.total_sp = pchar.total_sp + int(skill['skillpoints'])
if acc.has_access(18):
try:
skillqueue = CachedDocument.objects.api_query('/char/SkillInTraining.xml.aspx', params=auth_params, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving SkillInTraining.xml.aspx for User ID %s, Character ID %s - %s' % (key_id, character_id, exc))
else:
queuedoc = basic_xml_parse_doc(skillqueue)
if not 'error' in queuedoc['eveapi'] and 'result' in queuedoc['eveapi']:
queuedoc = queuedoc['eveapi']['result']
EVEPlayerCharacterSkill.objects.filter(character=pchar).update(in_training=0)
if int(queuedoc['skillInTraining']):
skillobj, created = EVESkill.objects.get_or_create(pk=queuedoc['trainingTypeID'])
charskillobj, created = EVEPlayerCharacterSkill.objects.get_or_create(skill=skillobj, character=pchar)
charskillobj.in_training = queuedoc['trainingToLevel']
charskillobj.save()
else:
EVEPlayerCharacterSkill.objects.filter(character=pchar).update(in_training=0)
# Process the character's roles
pchar.roles.clear()
roles = values.get('corporationRoles', None)
if roles and len(roles):
for r in roles:
role, created = EVEPlayerCharacterRole.objects.get_or_create(roleid=r['roleID'], name=r['roleName'])
pchar.roles.add(role)
if values['gender'] == 'Male':
pchar.gender = API_GENDER_MALE
else:
pchar.gender = API_GENDER_FEMALE
pchar.api_last_updated = now()
pchar.save()
if acc:
if not pchar.id in acc.characters.all().values_list('id', flat=True):
acc.characters.add(pchar)
if pchar.director and acc.api_keytype in [API_KEYTYPE_FULL, API_KEYTYPE_CORPORATION]:
from eve_api.tasks.corporation import import_corp_members
import_corp_members.delay(key_id=acc.pk, character_id=pchar.id)
return pchar
| {
"content_hash": "c4711ea671f3c1983e3b9dc182a5ff88",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 175,
"avg_line_length": 45.73529411764706,
"alnum_prop": 0.6394426580921758,
"repo_name": "nikdoof/test-auth",
"id": "4e87d1e6a59de9d76087f1fe6a47965f333f890c",
"size": "9330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/eve_api/tasks/character.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6049"
},
{
"name": "Python",
"bytes": "1025469"
}
],
"symlink_target": ""
} |
import models
from app import db
class Member(object):
__email = ""
__phone = ""
__reputation = ""
__password = ""
__name = ""
def __init__(self, name, password, email, phone):
self.__email = email
self.__phone = phone
self.__password = password
self.__name = name
# set to db and get reputation
@classmethod
def get_member(cls, id):
session = db.session()
# set variables from id
name = ""
password = ""
email = ""
phone = ""
reputation = ""
try:
existing_member = cls(name=name, password=password, email=email, phone=phone)
except:
return None
existing_member.__reputation = reputation
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value.title()
@property
def password(self):
return self.__password
@property
def email(self):
return self.__email
@email.setter
def email(self, value):
self.__email = value
@property
def phone(self):
return self.__phone
@phone.setter
def phone(self, value):
self.__phone = value
@property
def reputation(self):
return self.__reputation
# important inherited values
def __str__(self):
return "{name}".format(name=self.name)
def __repr__(self):
return "<User {name} e:{email} p:{phone}".format(name=self.name, email=self.email, phone=self.phone) | {
"content_hash": "c7e0dac3ae60339f759735f90c8684a6",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 108,
"avg_line_length": 20.532467532467532,
"alnum_prop": 0.545224541429475,
"repo_name": "marcstreeter/ChatterBlog",
"id": "18f08eff87727fcf98a7baea84b934e71bcbf6ec",
"size": "1581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/shared/Member/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18270"
}
],
"symlink_target": ""
} |
import os
import sys
import fnmatch
import subprocess
import datetime
import argparse
import logging
import re
def isLeapYear(year):
'''Returns TRUE if the given year (int) is leap and FALSE otherwise'''
leapyear = False
if year % 4 != 0:
leapyear = False
elif year % 100 != 0:
leapyear = True
elif year % 400 == 0:
leapyear = True
else:
leapyear = False
return leapyear
def listFiles(path, pattern):
res = list()
for path, subdirs, files in os.walk(path):
for name in files:
filepath = os.path.join(path, name)
if(re.match(pattern, filepath, flags=0)):
res.append(filepath)
return res
def testGribModis2SciDB():
'''Test if GRibeiro's modis2scidb is installed'''
res = True
try:
subprocess.check_output(["modis2scidb", "--h"])
except:
res = False
return res
def date2grid(dateFileName, period, startyear):
'''Return an time index (timid) from the input date (MODIS DOY) and time period (e.g 8 days). '''
res = -1
if period > 0:
dateYYYYDOY = dateFileName[1:] #Remove the A precedding the date
year = int(dateYYYYDOY[0:4])
doy = int(dateYYYYDOY[4:7])
ppy = int(365 / period) + 1 # Periods per year
if(period > 0 and (doy - 1) % period == 0):
idd = (doy - 1) / period
idy = (year - startyear) * ppy
res = idy + idd
else:
logging.error("date2grid: Invalid date")
elif period == -319980101: # Monthly - given as YYYYMMDD i.e 19980101, 19980201, 19980301
dateYYYYMMDD = dateFileName
year = int(dateYYYYMMDD[0:4])
mm = int(dateYYYYMMDD[4:6])
#dd = int(dateYYYYMMDD[6:8])
idy = (year - startyear) * 12
idd = mm - 1
res = idy + idd
return res
#********************************************************
# MAIN
#********************************************************
def main(argv):
t0 = datetime.datetime.now()
parser = argparse.ArgumentParser(description = "Exports MODIS-HDFs to binary files for uploading to SCIDB using GRibeiro's tool")
parser.add_argument("hdfFile", help = "Path to the HDF")
parser.add_argument("loadFolder", help = "Folder from where the binary files are uploaded to SCIDB")
parser.add_argument("product", help = "Product. e.g MOD09Q1")
parser.add_argument("--log", help = "Log level. Default = WARNING", default = 'WARNING')
#Get paramters
args = parser.parse_args()
hdfFile = args.hdfFile
loadFolder = os.path.join(args.loadFolder, '')
product = args.product
log = args.log
####################################################
# CONFIG
####################################################
prodList = ['MOD09Q1', 'MOD13Q1', 'TRMM3B43']
prodTemporalResolution = {
'MOD09Q1': 8,
'MOD13Q1': 16,
'TRMM3B43': -319980101
}
prodStartYear = {
'MOD09Q1': 2000,
'MOD13Q1': 2000,
'TRMM3B43': 1998
}
prodBands = {
'MOD09Q1': '0,1,2',
'MOD13Q1': '0,1,2,3,4,5,6,7,8,9,10,11',
'TRMM3B43': '0,1,2'
}
numeric_loglevel = getattr(logging, log.upper(), None)
if not isinstance(numeric_loglevel, int):
raise ValueError('Invalid log level: %s' % log)
logging.basicConfig(filename = 'log_hdf2sdbin.log', level = numeric_loglevel, format = '%(asctime)s %(levelname)s: %(message)s')
logging.info("log_hdf2sdbin.py: " + str(args))
####################################################
# VALIDATION
####################################################
if product in prodList == False:
logging.exception("Unknown product!")
raise Exception("Unknown product!")
if testGribModis2SciDB() == False:
logging.exception("GRibeiro's mod2scidb not found")
raise Exception("GRibeiro's mod2scidb not found")
####################################################
#
####################################################
cmd = ""
try:
period = prodTemporalResolution[product]
startyear = prodStartYear[product]
bands = prodBands[product]
filename = os.path.basename(hdfFile)
time_id = date2grid(filename.split(".")[1], period, startyear)
arg0 = "modis2scidb"
arg1 = " --f " + hdfFile
arg2 = " --o " + loadFolder + os.path.splitext(filename)[0] + ".sdbbin"
arg3 = " --b " + bands
arg4 = " --t " + str(time_id)
cmd = arg0 + arg1 + arg2 + arg3 + arg4
logging.debug("Command to call: " + cmd)
subprocess.check_call(str(cmd), shell = True)
except subprocess.CalledProcessError as e:
logging.exception("CalledProcessError: " + cmd + "\n" + str(e.message))
except ValueError as e:
logging.exception("ValueError: " + cmd + "\n" + str(e.message))
except OSError as e:
logging.exception("OSError: " + cmd + "\n" + str(e.message))
except:
e = sys.exc_info()[0]
logging.exception("Unknown exception: " + cmd + "\n" + str(e.message))
t1 = datetime.datetime.now()
tt = t1 - t0
logging.info("Finished in " + str(tt))
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "4bf7d263c4730e83661e2726a18d2fd7",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 130,
"avg_line_length": 30.235668789808916,
"alnum_prop": 0.6043817147672214,
"repo_name": "albhasan/modis2scidb",
"id": "353fe4b1afe997d8fb83a3301aa1e41c7fa9a880",
"size": "4747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdf2sdbbin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44711"
},
{
"name": "Shell",
"bytes": "840"
}
],
"symlink_target": ""
} |
from django.utils.functional import cached_property
from django.utils import six
from django.db.models import Transform, Lookup, CharField
class KeyTransform(Transform):
output_field = CharField()
def __init__(self, key, base_field, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
try:
self.key = int(key)
except ValueError:
self.key = key
self.base_field = base_field
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
if isinstance(self.key, int):
return "(%s->>%s)" % (lhs, self.key), params
return "(%s->>'%s')" % (lhs, self.key), params
@cached_property
def output_type(self):
return self.base_field
class KeyTransformFactory(object):
def __init__(self, key, base_field):
self.key = key
self.base_field = base_field
def __call__(self, *args, **kwargs):
return KeyTransform(self.key, self.base_field, *args, **kwargs)
class ExactLookup(Lookup):
lookup_name = 'exact'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
if len(rhs_params) == 1 and hasattr(rhs_params[0], "adapted"):
adapted = rhs_params[0].adapted
if isinstance(adapted, six.string_types):
rhs_params[0] = adapted
params = lhs_params + rhs_params
return '%s = %s' % (lhs, rhs), params
class ArrayLengthLookup(Lookup):
lookup_name = 'array_length'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return 'json_array_length(%s) = %s' % (lhs, rhs), params
class JsonBArrayLengthLookup(Lookup):
lookup_name = 'array_length'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return 'jsonb_array_length(%s) = %s' % (lhs, rhs), params
class JsonBContainsLookup(Lookup):
"""
jsonb-specific containment lookup that can be used as follows::
YourModel.objects.filter(data__jcontains={"author": "John Smith"}
This will be translated into the following SQL::
select * from yourmodel where data @> '{"author": "John Smith"}'::jsonb
You can also do interesting queries like::
MyMovie.objects.filter(data__jcontains={"tags": ["sad", "romantic"]}
Such queries can be accelerated by GiN indices on the jsonb field in
question.
:author: Charl P. Botha <cpbotha@vxlabs.com>
"""
# ideally we would call this 'contains'. However, in Django 'contains'
# lookups are explicitly handled by LIKE queries, and the
# Field.get_db_prep_lookup will then prepare your data for a DB LIKE query
# breaking our jsonb containment query. -- cpb
lookup_name = 'jcontains'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} @> {1}::jsonb".format(lhs, rhs), params
class JsonBHasLookup(Lookup):
""" JsonB specific lookup for the has (?) operator """
lookup_name = 'jhas'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ? {1}".format(lhs, rhs), params
class JsonBHasAnyLookup(Lookup):
""" JsonB specific lookup for the has any (?|) operator """
lookup_name = 'jhas_any'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ?| {1}".format(lhs, rhs), params
class JsonBHasAllLookup(Lookup):
""" JsonB specific lookup for the has all (?&) operator """
lookup_name = 'jhas_all'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ?& {1}".format(lhs, rhs), params
| {
"content_hash": "aba7f0e84ae7cce84d503b0ec0119f0d",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 31.77304964539007,
"alnum_prop": 0.6236607142857142,
"repo_name": "mamigot/django-pgjson",
"id": "5374a6c3a061a71287ea539d7d120c4cf9f8c782",
"size": "4507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django_pgjson/lookups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29939"
},
{
"name": "Shell",
"bytes": "271"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
import views
urlpatterns = [
url(r'^article/upload/', views.articleUpload, name='articleUpload'),
url(r'^article/download/(\w+)/', views.articleDownload, name='articleDownload'),
url(r'^article/all/(\d+)/(\d+)/', views.all, name='all'),
url(r'^article/mine/(\d+)/(\d+)/', views.mine, name='mine'),
url(r'^task/upload/', views.doTask, name='doTask'),
url(r'^history/(\d+)/(\d+)/', views.getHistory, name='getHistory'),
url(r'^task/(\d+)/(\d+)/', views.getTask, name='getTask'),
url(r'^overview/', views.overview, name='overview'),
url(r'^paragraph/(\w+)/', views.getParagraph, name='getParagraph'),
] | {
"content_hash": "51a51510d942481fea8b37dfab3fdb41",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 44.666666666666664,
"alnum_prop": 0.6253731343283582,
"repo_name": "zackszhu/hack_sjtu_2017",
"id": "ba8f17c03f1d72b72e499901637052d335f5e9bc",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/text/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "124123"
},
{
"name": "Python",
"bytes": "33884"
}
],
"symlink_target": ""
} |
import http.client
import json
id = '495'
connection = http.client.HTTPConnection('api.football-data.org')
headers = { 'X-Auth-Token': '4b7b23a920a845188380e9408be81bee', 'X-Response-Control': 'minified' }
# connection.request('GET', '/v1/fixtures/?timeFrame=p99&league=PPL', None, headers ) # Past 99 days in the Portuguese League
# connection.request('GET', '/v1/teams/' + id + '/fixtures', None, headers) # Get informations about the games of a specific team (id)
# connection.request('GET', '/v1/competitions/' + id + '/leagueTable', None, headers) # Get all the team from a league (id)
connection.request('GET', '/v1/teams/' + id, None, headers) # Get informations about a specific team (id)
response = json.loads(connection.getresponse().read().decode())
file = open(id + '.json', 'w', encoding="utf8")
json.dump(response, file) | {
"content_hash": "aee370a0f1afd4dabdbf908b27768143",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 134,
"avg_line_length": 55.8,
"alnum_prop": 0.7120669056152927,
"repo_name": "jtmnf/FootballPredictionsArtificialIntelligence",
"id": "bbd8d280fc12176f858a618254bbffc39c2ce267",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Files/GetFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "837"
}
],
"symlink_target": ""
} |
import pythonequations, pythonequations.EquationBaseClasses, pythonequations.ExtraCodeForEquationBaseClasses
import numpy
numpy.seterr(all = 'raise') # numpy raises warnings, convert to exceptions to trap them
class LinearLogarithmic3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Linear Logarithmic"
_HTML = "z = a + b*ln(x) + c*ln(y)"
coefficientDesignatorTuple = ("a", "b", "c")
LinearSSQSolverFlag = 1
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
CannotAcceptDataWithZeroY = True
CannotAcceptDataWithNegativeY = True
function_cpp_code = 'temp = coeff[0] + coeff[1] * _id[_cwo[1]+i] + coeff[2] * _id[_cwo[2]+i];'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(x_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(y_in);\n"
return s
class SimplifiedQuadraticLogarithmic3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Simplified Quadratic Logarithmic"
_HTML = "z = a + b*ln(x) + c*ln(y) + d*ln(x)<SUP>2</SUP> + f*ln(y)<SUP>2</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", 'f')
LinearSSQSolverFlag = 1
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
CannotAcceptDataWithZeroY = True
CannotAcceptDataWithNegativeY = True
function_cpp_code = 'temp = coeff[0] + coeff[1] * _id[_cwo[1]+i] + coeff[2] * _id[_cwo[2]+i] + coeff[3] * _id[_cwo[3]+i] + coeff[4] * _id[_cwo[4]+i];'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[2.0]), [2.0]])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(x_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(x_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(y_in), 2.0);\n"
return s
class FullQuadraticLogarithmic3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Full Quadratic Logarithmic"
_HTML = "z = a + b*ln(x) + c*ln(y) + d*ln(x)<SUP>2</SUP> + f*ln(y)<SUP>2</SUP> + g*ln(x)*ln(y)"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g")
LinearSSQSolverFlag = 1
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
CannotAcceptDataWithZeroY = True
CannotAcceptDataWithNegativeY = True
function_cpp_code = 'temp = coeff[0] + coeff[1] * _id[_cwo[1]+i] + coeff[2] * _id[_cwo[2]+i] + coeff[3] * _id[_cwo[3]+i] + coeff[4] * _id[_cwo[4]+i] + coeff[5] * _id[_cwo[5]+i];'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX_LogY(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(x_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(x_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(y_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * log(x_in) * log(y_in);\n"
return s
class SimplifiedCubicLogarithmic3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Simplified Cubic Logarithmic"
_HTML = "z = a + b*ln(x) + c*ln(y) + d*ln(x)<SUP>2</SUP> + f*ln(y)<SUP>2</SUP> + g*ln(x)<SUP>3</SUP> + h*ln(y)<SUP>3</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g", "h")
LinearSSQSolverFlag = 1
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
CannotAcceptDataWithZeroY = True
CannotAcceptDataWithNegativeY = True
function_cpp_code = 'temp = coeff[0] + coeff[1] * _id[_cwo[1]+i] + coeff[2] * _id[_cwo[2]+i] + coeff[3] * _id[_cwo[3]+i] + coeff[4] * _id[_cwo[4]+i] + coeff[5] * _id[_cwo[5]+i] + coeff[6] * _id[_cwo[6]+i];'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[3.0]), [3.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[3.0]), [3.0]])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(x_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(x_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(y_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * pow(log(x_in), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[6] + " * pow(log(y_in), 3.0);\n"
return s
class FullCubicLogarithmic3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Full Cubic Logarithmic"
_HTML = "z = a + b*ln(x) + c*ln(y) + d*ln(x)<SUP>2</SUP> + f*ln(y)<SUP>2</SUP> + g*ln(x)<SUP>3</SUP> + h*ln(y)<SUP>3</SUP> + i*ln(x)*ln(y) + j*ln(x)<SUP>2</SUP>*ln(y) + k*ln(x)*ln(y)<SUP>2</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g", "h", "i", "j", "k")
LinearSSQSolverFlag = 1
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
CannotAcceptDataWithZeroY = True
CannotAcceptDataWithNegativeY = True
function_cpp_code = 'temp = coeff[0] + coeff[1] * _id[_cwo[1]+i] + coeff[2] * _id[_cwo[2]+i] + coeff[3] * _id[_cwo[3]+i] + coeff[4] * _id[_cwo[4]+i] + coeff[5] * _id[_cwo[5]+i] + coeff[6] * _id[_cwo[6]+i] + coeff[7] * _id[_cwo[7]+i] + coeff[8] * _id[_cwo[8]+i] + coeff[9] * _id[_cwo[9]+i];'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[2.0]), [2.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX(NameOrValueFlag=1, args=[3.0]), [3.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogY(NameOrValueFlag=1, args=[3.0]), [3.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX_LogY(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX_PowLogY(NameOrValueFlag=1, args=[2.0, 1.0]), [2.0, 1.0]])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowLogX_PowLogY(NameOrValueFlag=1, args=[1.0, 2.0]), [1.0, 2.0]])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(x_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(x_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(y_in), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * pow(log(x_in), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[6] + " * pow(log(y_in), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[7] + " * log(x_in) * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[8] + " * pow(log(x_in), 2.0) * log(y_in);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[9] + " * log(x_in) * pow(log(y_in), 2.0);\n"
return s
class LinearLogarithmicTransform3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Linear Logarithmic Transform"
_HTML = "z = a + b*ln(d*x+f) + c*ln(g*y+h)"
coefficientDesignatorTuple = ("a", "b", "c", 'd', 'f', 'g' 'h')
function_cpp_code = 'temp = coeff[0] + coeff[1] * log(coeff[3]*_id[_cwo[0]+i]+coeff[4]) + coeff[2] * log(coeff[5]*_id[_cwo[1]+i]+coeff[6]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Y(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(d * x_in + f);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(g * y_in + h);\n"
return s
class SimplifiedQuadraticLogarithmicTransform3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Simplified Quadratic Logarithmic Transform"
_HTML = "z = a + b*ln(g*x+h) + c*ln(i*y+j) + d*ln(g*x+h)<SUP>2</SUP> + f*ln(i*y+j)<SUP>2</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", 'f', 'g', 'h', 'i', 'j')
function_cpp_code = 'temp_x_sq = log(coeff[5] * _id[_cwo[0]+i] + coeff[6]);'
function_cpp_code += 'temp_y_sq = log(coeff[7] * _id[_cwo[1]+i] + coeff[8]);'
function_cpp_code += 'temp = coeff[0] + coeff[1] * temp_x_sq + coeff[2] * temp_y_sq'
function_cpp_code += ' + coeff[3] * pow(temp_x_sq, 2.0) + coeff[4] * pow(temp_y_sq, 2.0);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Y(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(g * x_in + h);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(i * y_in + j);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(g * x_in + h), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(i * y_in + j), 2.0);\n"
return s
class FullQuadraticLogarithmicTransform3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Full Quadratic Logarithmic Transform"
_HTML = "z = a + b*ln(h*x+i) + c*ln(j*y+k) + d*ln(h*x+i)<SUP>2</SUP> + f*ln(j*y+k)<SUP>2</SUP> + g*ln(h*x+i)*ln(j*y+k)"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g", 'h', 'i', 'j', 'k')
function_cpp_code = 'temp_x_sq = log(coeff[6]*_id[_cwo[0]+i]+coeff[7]);'
function_cpp_code += 'temp_y_sq = log(coeff[8]*_id[_cwo[1]+i]+coeff[9]);'
function_cpp_code += 'temp = coeff[0] + coeff[1] * temp_x_sq + coeff[2] * temp_y_sq + '
function_cpp_code += 'coeff[3] * pow(temp_x_sq, 2.0) + coeff[4] * pow(temp_y_sq, 2.0) + '
function_cpp_code += 'coeff[5] * temp_x_sq * temp_y_sq;'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(h * x_in + i);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(j * y_in + k);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(h * x_in + i), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(j * y_in + k), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * log(h * x_in + i) * log(j * y_in + k);\n"
return s
class SimplifiedCubicLogarithmicTransform3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Simplified Cubic Logarithmic Transform"
_HTML = "z = a + b*ln(i*x+j) + c*ln(k*y+m) + d*ln(i*x+j)<SUP>2</SUP> + f*ln(k*y+m)<SUP>2</SUP> + g*ln(i*x+j)<SUP>3</SUP> + h*ln(k*y+m)<SUP>3</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g", 'h', 'i', 'j', 'k', 'm')
function_cpp_code = 'temp_x_sq = log(coeff[7]*_id[_cwo[0]+i]+coeff[8]);'
function_cpp_code += 'temp_y_sq = log(coeff[9]*_id[_cwo[1]+i]+coeff[10]);'
function_cpp_code += 'temp = coeff[0] + coeff[1] * temp_x_sq + coeff[2] * temp_y_sq + '
function_cpp_code += 'coeff[3] * pow(temp_x_sq, 2.0) + coeff[4] * pow(temp_y_sq, 2.0) + '
function_cpp_code += 'coeff[5] * pow(temp_x_sq, 3.0) + coeff[6] * pow(temp_y_sq, 3.0);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(i * x_in + j);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(k * y_in + m);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(i * x_in + j), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(k * y_in + m), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * pow(log(i * x_in + j), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[6] + " * pow(log(k * y_in + m), 3.0);\n"
return s
class FullCubicLogarithmicTransform3D(pythonequations.EquationBaseClasses.Equation3D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = False
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name ="Full Cubic Logarithmic Transform"
_HTML = "z = a + b*ln(m*x+n) + c*ln(o*y+p) + d*ln(m*x+n)<SUP>2</SUP> + f*ln(o*y+p)<SUP>2</SUP> + g*ln(m*x+n)<SUP>3</SUP> + h*ln(o*y+p)<SUP>3</SUP> + i*ln(m*x+n)*ln(o*y+p) + j*ln(m*x+n)<SUP>2</SUP>*ln(o*y+p) + k*ln(m*x+n)*ln(o*y+p)<SUP>2</SUP>"
coefficientDesignatorTuple = ("a", "b", "c", "d", "f", "g", "h", "i", "j", 'k', 'm', 'n', 'o', 'p')
function_cpp_code = 'temp_x_sq = log(coeff[10]*_id[_cwo[0]+i]+coeff[11]);'
function_cpp_code += 'temp_y_sq = log(coeff[12]*_id[_cwo[1]+i]+coeff[13]);'
function_cpp_code += 'temp = coeff[0] + coeff[1] * temp_x_sq + coeff[2] * temp_y_sq + '
function_cpp_code += 'coeff[3] * pow(temp_x_sq, 2.0) + coeff[4] * pow(temp_y_sq, 2.0) + '
function_cpp_code += 'coeff[5] * pow(temp_x_sq, 3.0) + coeff[6] * pow(temp_y_sq, 3.0) + '
function_cpp_code += 'coeff[7] * temp_x_sq * temp_y_sq + '
function_cpp_code += 'coeff[8] * pow(temp_x_sq, 2.0) * temp_y_sq + coeff[9] * temp_x_sq * pow(temp_y_sq, 2.0);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogY(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp += " + self.coefficientDesignatorTuple[0] + ";\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[1] + " * log(m * x_in + n);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[2] + " * log(o * y_in + p);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[3] + " * pow(log(m * x_in + n), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[4] + " * pow(log(o * y_in + p), 2.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[5] + " * pow(log(m * x_in + n), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[6] + " * pow(log(o * y_in + p), 3.0);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[7] + " * log(m * x_in + n) * log(o * y_in + p);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[8] + " * pow(log(m * x_in + n), 2.0) * log(o * y_in + p);\n"
s += "\ttemp += " + self.coefficientDesignatorTuple[9] + " * log(m * x_in + n) * pow(log(o * y_in + p), 2.0);\n"
return s
| {
"content_hash": "ef8d398e98a78e729b2256dc6e19d3cc",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 294,
"avg_line_length": 63.75297619047619,
"alnum_prop": 0.6492694085243452,
"repo_name": "JMoravec/unkRadnet",
"id": "e57ccef59a5f20497de309817ddc8d58f81bf80d",
"size": "21771",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zunzunCode/pythonequations/Equations3D/Logarithmic.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6107"
},
{
"name": "Logos",
"bytes": "132148"
},
{
"name": "M",
"bytes": "832584"
},
{
"name": "Matlab",
"bytes": "401"
},
{
"name": "Python",
"bytes": "2747757"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
from fabric.api import run, env, cd, prefix, shell_env, local
from config import load_config
config = load_config()
host_string = config.HOST_STRING
def deploy():
env.host_string = config.HOST_STRING
with cd('/var/www/#{project}'):
with shell_env(MODE='PRODUCTION'):
run('git reset --hard HEAD')
run('git pull')
run('npm install')
run('gulp')
with prefix('source venv/bin/activate'):
run('pip install -r requirements.txt')
run('python manage.py db upgrade')
run('python manage.py build')
run('supervisorctl restart #{project}')
def restart():
env.host_string = config.HOST_STRING
run('supervisorctl restart #{project}')
| {
"content_hash": "3f49869a7384d96512eb49f70177880d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 30.76,
"alnum_prop": 0.5890767230169051,
"repo_name": "hustlzp/Flask-Boost",
"id": "89607f85deadb7a052cde6fcf4cf448dd851ccec",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_boost/project/fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2051"
},
{
"name": "HTML",
"bytes": "9721"
},
{
"name": "JavaScript",
"bytes": "8122"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "876"
},
{
"name": "Python",
"bytes": "37623"
},
{
"name": "Shell",
"bytes": "23"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_sigrix_slix_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_tals_n","sigrix_slix_q2_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "5106dade6711bfe39e5787ebc8c6aa8f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.7018072289156626,
"repo_name": "obi-two/Rebelion",
"id": "4641a18a7acb978d2638778c1d649df744c07dc4",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_sigrix_slix_q2_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from customforms.models import Form, Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 0
fields = ('title', 'position', )
class QuestionAdmin(admin.ModelAdmin):
list_display = ('form', 'title', 'position')
list_filter = ('form', )
inlines = [ChoiceInline]
save_on_top = True
fields = (
'title', 'form', 'help_text', 'question_type', 'required', 'position')
class FormAdmin(admin.ModelAdmin):
list_display = ('title', )
admin.site.register(Form, FormAdmin)
admin.site.register(Question, QuestionAdmin)
| {
"content_hash": "35b18b7153372a705fddd844cc4ab33a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 23.76923076923077,
"alnum_prop": 0.6715210355987055,
"repo_name": "cschwede/django-customforms",
"id": "93841bb20b229760d85e1706e2101c9cdc7477b6",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customforms/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9621"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.