code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
""" Registration tests """
import os
from shutil import copy
import pytest
from tempfile import TemporaryDirectory
from nipype.pipeline import engine as pe
from ..interfaces.reportlets.registration import (
FLIRTRPT,
SpatialNormalizationRPT,
ANTSRegistrationRPT,
BBRegisterRPT,
MRICoregRPT,
ApplyXFMRPT,
SimpleBeforeAfterRPT,
)
from .conftest import _run_interface_mock, datadir, has_fsl, has_freesurfer
def _smoke_test_report(report_interface, artifact_name):
with TemporaryDirectory() as tmpdir:
res = pe.Node(report_interface, name="smoke_test", base_dir=tmpdir).run()
out_report = res.outputs.out_report
save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False)
if save_artifacts:
copy(out_report, os.path.join(save_artifacts, artifact_name))
assert os.path.isfile(out_report), "Report does not exist"
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_FLIRTRPT(reference, moving):
""" the FLIRT report capable test """
flirt_rpt = FLIRTRPT(generate_report=True, in_file=moving, reference=reference)
_smoke_test_report(flirt_rpt, "testFLIRT.svg")
@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer")
def test_MRICoregRPT(monkeypatch, reference, moving, nthreads):
""" the MRICoreg report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.out_lta_file = os.path.join(datadir, "testMRICoregRPT-out_lta_file.lta")
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(MRICoregRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(MRICoregRPT, "aggregate_outputs", _agg)
mri_coreg_rpt = MRICoregRPT(
generate_report=True,
source_file=moving,
reference_file=reference,
num_threads=nthreads,
)
_smoke_test_report(mri_coreg_rpt, "testMRICoreg.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_ApplyXFMRPT(reference, moving):
""" the ApplyXFM report capable test """
flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference)
applyxfm_rpt = ApplyXFMRPT(
generate_report=True,
in_file=moving,
in_matrix_file=flirt_rpt.run().outputs.out_matrix_file,
reference=reference,
apply_xfm=True,
)
_smoke_test_report(applyxfm_rpt, "testApplyXFM.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_SimpleBeforeAfterRPT(reference, moving):
""" the SimpleBeforeAfterRPT report capable test """
flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference)
ba_rpt = SimpleBeforeAfterRPT(
generate_report=True, before=reference, after=flirt_rpt.run().outputs.out_file
)
_smoke_test_report(ba_rpt, "test_SimpleBeforeAfterRPT.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_FLIRTRPT_w_BBR(reference, reference_mask, moving):
""" test FLIRTRPT with input `wm_seg` set.
For the sake of testing ONLY, `wm_seg` is set to the filename of a brain mask """
flirt_rpt = FLIRTRPT(
generate_report=True, in_file=moving, reference=reference, wm_seg=reference_mask
)
_smoke_test_report(flirt_rpt, "testFLIRTRPTBBR.svg")
@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer")
def test_BBRegisterRPT(monkeypatch, moving):
""" the BBRegister report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.out_lta_file = os.path.join(
datadir, "testBBRegisterRPT-out_lta_file.lta"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(BBRegisterRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(BBRegisterRPT, "aggregate_outputs", _agg)
subject_id = "fsaverage"
bbregister_rpt = BBRegisterRPT(
generate_report=True,
contrast_type="t1",
init="fsl",
source_file=moving,
subject_id=subject_id,
registered_file=True,
)
_smoke_test_report(bbregister_rpt, "testBBRegister.svg")
def test_SpatialNormalizationRPT(monkeypatch, moving):
""" the SpatialNormalizationRPT report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(
SpatialNormalizationRPT, "_run_interface", _run_interface_mock
)
monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg)
ants_rpt = SpatialNormalizationRPT(
generate_report=True, moving_image=moving, flavor="testing"
)
_smoke_test_report(ants_rpt, "testSpatialNormalizationRPT.svg")
def test_SpatialNormalizationRPT_masked(monkeypatch, moving, reference_mask):
""" the SpatialNormalizationRPT report capable test with masking """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(
SpatialNormalizationRPT, "_run_interface", _run_interface_mock
)
monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg)
ants_rpt = SpatialNormalizationRPT(
generate_report=True,
moving_image=moving,
reference_mask=reference_mask,
flavor="testing",
)
_smoke_test_report(ants_rpt, "testSpatialNormalizationRPT_masked.svg")
def test_ANTSRegistrationRPT(monkeypatch, reference, moving):
""" the SpatialNormalizationRPT report capable test """
import pkg_resources as pkgr
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testANTSRegistrationRPT-warped_image.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(ANTSRegistrationRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(ANTSRegistrationRPT, "aggregate_outputs", _agg)
ants_rpt = ANTSRegistrationRPT(
generate_report=True,
moving_image=moving,
fixed_image=reference,
from_file=pkgr.resource_filename(
"niworkflows.data", "t1w-mni_registration_testing_000.json"
),
)
_smoke_test_report(ants_rpt, "testANTSRegistrationRPT.svg")
|
poldracklab/niworkflows
|
niworkflows/tests/test_registration.py
|
Python
|
bsd-3-clause
| 7,017
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import six
import argparse
from . import Command
from .run import Run
from .compare import Compare
from ..repo import get_repo, NoSuchNameError
from ..console import color_print, log
from .. import results
from .. import util
from . import common_args
class Continuous(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"continuous", help="Compare two commits directly",
description="""Run a side-by-side comparison of two commits for
continuous integration.""")
parser.add_argument(
'base', nargs='?', default=None,
help="""The commit/branch to compare against. By default, the
parent of the tested commit.""")
parser.add_argument(
'branch', default=None,
help="""The commit/branch to test. By default, the first configured branch.""")
common_args.add_record_samples(parser, record_default=True)
parser.add_argument(
"--quick", "-q", action="store_true",
help="""Do a "quick" run, where each benchmark function is
run only once. This is useful to find basic errors in the
benchmark functions faster. The results are unlikely to
be useful, and thus are not saved.""")
parser.add_argument(
"--interleave-rounds", action="store_true", default=None,
help="""Interleave benchmarks with multiple rounds across
commits. This can avoid measurement biases from commit ordering,
can take longer.""")
parser.add_argument(
"--no-interleave-rounds", action="store_false", dest="interleave_rounds")
# Backward compatibility for '--(no-)interleave-rounds'
parser.add_argument(
"--interleave-processes", action="store_true", default=False, dest="interleave_rounds",
help=argparse.SUPPRESS)
parser.add_argument(
"--no-interleave-processes", action="store_false", dest="interleave_rounds",
help=argparse.SUPPRESS)
parser.add_argument(
"--strict", action="store_true",
help="When set true the run command will exit with a non-zero "
"return code if any benchmark is in a failed state")
common_args.add_compare(parser, sort_default='ratio', only_changed_default=True)
common_args.add_show_stderr(parser)
common_args.add_bench(parser)
common_args.add_machine(parser)
common_args.add_environment(parser)
common_args.add_launch_method(parser)
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf=conf, branch=args.branch, base=args.base,
factor=args.factor, split=args.split,
only_changed=args.only_changed, sort=args.sort,
use_stats=args.use_stats,
show_stderr=args.show_stderr, bench=args.bench, attribute=args.attribute,
machine=args.machine,
env_spec=args.env_spec, record_samples=args.record_samples,
append_samples=args.append_samples,
quick=args.quick, interleave_rounds=args.interleave_rounds,
launch_method=args.launch_method, strict=args.strict, **kwargs
)
@classmethod
def run(cls, conf, branch=None, base=None,
factor=None, split=False, only_changed=True, sort='ratio', use_stats=True,
show_stderr=False, bench=None,
attribute=None, machine=None, env_spec=None, record_samples=False, append_samples=False,
quick=False, interleave_rounds=None, launch_method=None, _machine_file=None,
strict=False):
repo = get_repo(conf)
repo.pull()
if branch is None:
branch = conf.branches[0]
try:
head = repo.get_hash_from_name(branch)
if base is None:
parent = repo.get_hash_from_parent(head)
else:
parent = repo.get_hash_from_name(base)
except NoSuchNameError as exc:
raise util.UserError("Unknown commit {0}".format(exc))
commit_hashes = [head, parent]
run_objs = {}
result = Run.run(
conf, range_spec=commit_hashes, bench=bench, attribute=attribute,
show_stderr=show_stderr, machine=machine, env_spec=env_spec,
record_samples=record_samples, append_samples=append_samples, quick=quick,
interleave_rounds=interleave_rounds,
launch_method=launch_method, strict=strict,
_returns=run_objs, _machine_file=_machine_file)
if result:
return result
log.flush()
def results_iter(commit_hash):
for env in run_objs['environments']:
machine_name = run_objs['machine_params']['machine']
filename = results.get_filename(
machine_name, commit_hash, env.name)
filename = os.path.join(conf.results_dir, filename)
try:
result = results.Results.load(filename, machine_name)
except util.UserError as err:
log.warning(six.text_type(err))
continue
for name, benchmark in six.iteritems(run_objs['benchmarks']):
params = benchmark['params']
version = benchmark['version']
value = result.get_result_value(name, params)
stats = result.get_result_stats(name, params)
samples = result.get_result_samples(name, params)
yield name, params, value, stats, samples, version, machine_name, env.name
commit_names = {parent: repo.get_name_from_hash(parent),
head: repo.get_name_from_hash(head)}
status = Compare.print_table(conf, parent, head,
resultset_1=results_iter(parent),
resultset_2=results_iter(head),
factor=factor, split=split,
use_stats=use_stats,
only_changed=only_changed, sort=sort,
commit_names=commit_names)
worsened, improved = status
color_print("")
if worsened:
color_print("SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.", 'red')
color_print("PERFORMANCE DECREASED.", 'red')
elif improved:
color_print("SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.", 'green')
color_print("PERFORMANCE INCREASED.", 'green')
else:
color_print("BENCHMARKS NOT SIGNIFICANTLY CHANGED.", 'green')
return worsened
|
spacetelescope/asv
|
asv/commands/continuous.py
|
Python
|
bsd-3-clause
| 7,132
|
# -*- coding: utf-8 -*-
"""
{{ project_name }}.libs.common.models
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains all models that can be used across apps
:copyright: (c) 2015
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .utils.text import slugify
class SlugModel(models.Model):
"""
A base class for any model that wants to implement an auto generated slug
field.
"""
# how many times we'll retry creating a slug before giving up
MAX_RETRIES = 100
slug = models.SlugField(_('slug'), max_length=255, unique=True)
class Meta:
abstract = True
@classmethod
def is_valid_slug(cls, slug):
"""Convenience method to check if the given slug already exists."""
return not cls.objects.filter(slug=slug).exists()
@classmethod
def get_by_slug(cls, slug):
"""
Return the :class:`{{ project_name }}.libs.common.models.SlugModel` for the given
slug. If the slug dosen't exist, return None.
:param slug: the slug value to search for
"""
try:
return cls.objects.get(slug=slug)
except cls.DoesNotExist:
return None
def base_slug_value(self):
"""
As a subclass of :class:`{{ project_name }}.libs.common.models.SlugModel` one must
implement the :method:`{{ project_name }}.libs.common.models.SlugModel.base_slug_value`
which returns a unicode value that is used as the basis of the slug value.
"""
raise NotImplementedError
def generate_slug(self, value=None):
"""
Create a slug based on the value of
:method:`{{ project_name }}.libs.common.models.SlugModel.base_slug_value`, ensure
that the slug is unique by comparing it to existing slugs.
"""
if value is None:
value = self.base_slug_value()
field = self._meta.get_field('slug')
return slugify(value, max_length=field.max_length,
usable=self.is_valid_slug, max_retries=self.MAX_RETRIES)
def save(self, *args, **kwargs):
"""
Right before a model is saved, check to see if the slug field has yet
to be defined. If so, generate and set the
:attr:`{{ project_name }}.libs.common.models.SlugModel.slug`.
"""
if not self.slug:
# a slug has not yet been defined, generate one
self.slug = self.generate_slug()
return super(SlugModel, self).save(*args, **kwargs)
|
ericbuckley/django-project-template
|
project_name/libs/common/models.py
|
Python
|
bsd-3-clause
| 2,547
|
from impedance_map.sphere import correlation_coefficient, form_factor, \
pair_distribution_function_PY, structure_factor_function_PY, \
cross_section_dimension, fit_correlation_coefficient_nls, \
fit_form_factor_nls
import numpy as np
import math
import unittest
class TestCode(unittest.TestCase):
def test1_sphere_cross_section_dimension(self):
a = 1
q = 0
A = cross_section_dimension(a, q)
A_real = 1
self.assertTrue(A == A_real)
def test2_sphere_cross_section_dimension(self):
a = 1
q = 1
A = cross_section_dimension(a, q)
A_real = 0
self.assertTrue(A == A_real)
def test3_sphere_cross_section_dimension(self):
a = 1
q = 0.5
A = cross_section_dimension(a, q)
A_real = np.sqrt(1 - 0.5 ** 2)
self.assertTrue(A == A_real)
def test1_sphere_form_factor(self):
ndim = 1
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.70807342, 0.20670545, 0.00221276, 0.03579688,
0.03678143, 0.0021687])
self.assertTrue(np.allclose(H, Hr))
def test2_sphere_form_factor(self):
ndim = 2
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.77457807, 0.3326115, 0.05109377, 0.00109043,
0.01716929, 0.008506])
self.assertTrue(np.allclose(H, Hr))
def test3_sphere_form_factor(self):
ndim = 3
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.81632316, 0.42653525, 0.11949293, 0.00758346,
0.00325512, 0.00703836])
self.assertTrue(np.allclose(H, Hr))
def test1_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 2., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test2_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 3., 16)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(len(r))
b_real[0:11] = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test3_sphere_corr_coeff(self):
ndim = 1
a = 3.
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test4_sphere_corr_coeff(self):
ndim = 1
a = 0.0
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(11)
self.assertTrue(np.allclose(b, b_real))
def test5_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test6_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test7_sphere_corr_coeff(self):
ndim = 2
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test8_sphere_corr_coeff(self):
ndim = 2
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test9_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test10_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test11_sphere_corr_coeff(self):
ndim = 3
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test13_sphere_corr_coeff(self):
ndim = 3
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test1_pair_distribution_function_PY(self):
f = 0.4
a = 0.01
rb = np.linspace(1, 5, 5)
r = rb * 2 * a
g = pair_distribution_function_PY(r, a, f)
g_real = np.asarray([ 1.665534 , 1.14167826, 1.04312259, 1.01389934, 1.00453527])
self.assertTrue(np.allclose(g, g_real))
def test1_structure_factor_function_PY(self):
k = np.linspace(0, 10, 5)
f = 0.15
a = 0.01
S = structure_factor_function_PY(k/a, a=0.01, f=0.15)
S_real = np.asarray([ 0.30887944, 1.03988757, 0.95564256, 0.98177134, 1.00532684])
self.assertTrue(np.allclose(S, S_real))
def test1_fit_correlation_coefficient_nls(self):
a = 0.75
r = np.linspace(0, 3., 10)
y = correlation_coefficient(a=a, r=r)
a_guess = fit_correlation_coefficient_nls(r, y)
self.assertTrue(np.allclose(a, a_guess))
def test1_fit_form_factor_nls(self):
a = 0.75
k = np.linspace(0.01, 3, 10)
y = form_factor(a=a, k=k)
a_guess = fit_form_factor_nls(k, y)
self.assertTrue(np.allclose(a, a_guess))
if __name__ == '__main__':
print 'Running unit tests for impedance_map.sphere'
unittest.main()
|
aluchies/impedance_map
|
impedance_map/tests/test_sphere.py
|
Python
|
bsd-3-clause
| 6,422
|
#! /usr/bin/python
#
# Copyright (C) 2012,2014 Stefano Sanfilippo.
# See LICENSE.txt in the main source package for more information
#
from __future__ import with_statement
import sys
# Ugly ugly trick to give us compatibility both with Py2 and Py3k
try:
import cStringIO as StringIO
except ImportError:
try:
import StringIO
except ImportError:
import io as StringIO
FLAG = ['DropTail', 'RED', 'CBQ', 'FQ', 'SFQ', 'DRR']
def fix(filename, overwrite=False):
"""Will append a `Off` flag into each `(Duplex|Simplex)` Link declaration.
Needed because the old file format was updated to include
Queue Visualization. Converted file will be saved to ${somename}.new.nss
Args:
filename: the name of the file to be converted.
overwrite: will overwrite input file if `True`.
Returns:
None
"""
with StringIO.StringIO() as buffer:
with open(filename, 'rt') as sourcefile:
ready = steady = False
for line in sourcefile:
buffer.write(line)
if line[:-1] in FLAG:
ready = True
if ready and not steady:
steady = True
elif ready and steady:
buffer.write('Off\n')
ready = steady = False
if not overwrite:
filename = filename.replace('.nss', '.new.nss')
with open(filename, 'wt') as sourcefile:
sourcefile.write(buffer.getvalue())
def main():
filenames = sys.argv[1:]
if filenames:
for filename in filenames:
print ('Converting %s' % filename)
fix(filename)
else:
print('Usage: %s file1.nss [file2.nss [...]]' % sys.argv[0])
sys.exit(0)
if __name__ == '__main__':
main()
|
esseks/nscript
|
scripts/fixnss.py
|
Python
|
bsd-3-clause
| 1,822
|
# -*- coding: utf-8 -*-
from ._compat import unittest
from ._adapt import DEFAULT_URI, drop, IS_MSSQL, IS_IMAP, IS_GAE, IS_TERADATA, IS_ORACLE
from pydal import DAL, Field
from pydal._compat import PY2
@unittest.skipIf(IS_IMAP, "Reference not Null unsupported on IMAP")
@unittest.skipIf(IS_ORACLE, "Reference Not Null unsupported on Oracle")
class TestReferenceNOTNULL(unittest.TestCase):
# 1:N not null
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt", Field("vv"), Field("tt_id", "%s tt" % ref, notnull=True)
)
self.assertRaises(Exception, db.ttt.insert, vv="pydal")
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Reference Unique unsupported on IMAP")
@unittest.skipIf(IS_GAE, "Reference Unique unsupported on GAE")
@unittest.skipIf(IS_ORACLE, "Reference Unique unsupported on Oracle")
class TestReferenceUNIQUE(unittest.TestCase):
# 1:1 relation
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt",
Field("vv"),
Field("tt_id", "%s tt" % ref, unique=True),
Field("tt_uq", "integer", unique=True),
)
id_1 = db.tt.insert(vv="pydal")
id_2 = db.tt.insert(vv="pydal")
# Null tt_id
db.ttt.insert(vv="pydal", tt_uq=1)
# first insert is OK
db.ttt.insert(tt_id=id_1, tt_uq=2)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_1, tt_uq=3)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_2, tt_uq=2)
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Reference Unique not Null unsupported on IMAP")
@unittest.skipIf(IS_GAE, "Reference Unique not Null unsupported on GAE")
@unittest.skipIf(IS_ORACLE, "Reference Unique not Null unsupported on Oracle")
class TestReferenceUNIQUENotNull(unittest.TestCase):
# 1:1 relation not null
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt",
Field("vv"),
Field("tt_id", "%s tt" % ref, unique=True, notnull=True),
)
self.assertRaises(Exception, db.ttt.insert, vv="pydal")
db.commit()
id_i = db.tt.insert(vv="pydal")
# first insert is OK
db.ttt.insert(tt_id=id_i)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_i)
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Skip unicode on IMAP")
@unittest.skipIf(IS_MSSQL and not PY2, "Skip unicode on py3 and MSSQL")
class TestUnicode(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
db.define_table("tt", Field("vv"))
vv = "ἀγοραζε"
id_i = db.tt.insert(vv=vv)
row = db(db.tt.id == id_i).select().first()
self.assertEqual(row.vv, vv)
db.commit()
drop(db.tt)
db.close()
class TestParseDateTime(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
#: skip for adapters that use drivers for datetime parsing
if db._adapter.parser.registered.get("datetime") is None:
return
parse = lambda v: db._adapter.parser.parse(v, "datetime", "datetime")
dt = parse("2015-09-04t12:33:36.223245")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 12)
dt = parse("2015-09-04t12:33:36.223245Z")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 12)
dt = parse("2015-09-04t12:33:36.223245-2:0")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 10)
dt = parse("2015-09-04t12:33:36+1:0")
self.assertEqual(dt.microsecond, 0)
self.assertEqual(dt.hour, 13)
dt = parse("2015-09-04t12:33:36.123")
self.assertEqual(dt.microsecond, 123000)
dt = parse("2015-09-04t12:33:36.00123")
self.assertEqual(dt.microsecond, 1230)
dt = parse("2015-09-04t12:33:36.1234567890")
self.assertEqual(dt.microsecond, 123456)
db.close()
@unittest.skipIf(IS_IMAP, "chained join unsupported on IMAP")
@unittest.skipIf(IS_TERADATA, "chained join unsupported on TERADATA")
class TestChainedJoinUNIQUE(unittest.TestCase):
# 1:1 relation
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
db.define_table("aa", Field("name"))
db.define_table("bb", Field("aa", "reference aa"), Field("name"))
for k in ("x", "y", "z"):
i = db.aa.insert(name=k)
for j in ("u", "v", "w"):
db.bb.insert(aa=i, name=k + j)
db.commit()
rows = db(db.aa).select()
rows.join(db.bb.aa, fields=[db.bb.name], orderby=[db.bb.name])
self.assertEqual(rows[0].bb[0].name, "xu")
self.assertEqual(rows[0].bb[1].name, "xv")
self.assertEqual(rows[0].bb[2].name, "xw")
self.assertEqual(rows[1].bb[0].name, "yu")
self.assertEqual(rows[1].bb[1].name, "yv")
self.assertEqual(rows[1].bb[2].name, "yw")
self.assertEqual(rows[2].bb[0].name, "zu")
self.assertEqual(rows[2].bb[1].name, "zv")
self.assertEqual(rows[2].bb[2].name, "zw")
rows = db(db.bb).select()
rows.join(db.aa.id, fields=[db.aa.name])
self.assertEqual(rows[0].aa.name, "x")
self.assertEqual(rows[1].aa.name, "x")
self.assertEqual(rows[2].aa.name, "x")
self.assertEqual(rows[3].aa.name, "y")
self.assertEqual(rows[4].aa.name, "y")
self.assertEqual(rows[5].aa.name, "y")
self.assertEqual(rows[6].aa.name, "z")
self.assertEqual(rows[7].aa.name, "z")
self.assertEqual(rows[8].aa.name, "z")
rows_json = rows.as_json()
drop(db.bb)
drop(db.aa)
db.close()
class TestNullAdapter(unittest.TestCase):
# Test that NullAdapter can define tables
def testRun(self):
db = DAL(None)
db.define_table("no_table", Field("aa"))
self.assertIsInstance(db.no_table.aa, Field)
self.assertIsInstance(db.no_table["aa"], Field)
db.close()
|
willimoa/pydal
|
tests/base.py
|
Python
|
bsd-3-clause
| 7,487
|
from __future__ import absolute_import, unicode_literals
from .premailer import Premailer, transform
__version__ = '2.9.4'
|
industrydive/premailer
|
premailer/__init__.py
|
Python
|
bsd-3-clause
| 124
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.broccoli.preprocess import MotionCorrection
def test_MotionCorrection_inputs():
input_map = dict(args=dict(argstr='%s',
),
device=dict(argstr='-device %d',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=0,
),
iterations=dict(argstr='-iterations %d',
),
output=dict(argstr='-output %s',
),
output_type=dict(),
platform=dict(argstr='-platform %d',
),
quiet=dict(argstr='-quiet',
),
referencevolume=dict(argstr='-referencevolume %s',
),
terminal_output=dict(nohash=True,
),
verbose=dict(argstr='-verbose',
),
)
inputs = MotionCorrection.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MotionCorrection_outputs():
output_map = dict(motioncorrected_file=dict(),
parameters_file=dict(),
)
outputs = MotionCorrection.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
wanderine/nipype
|
nipype/interfaces/broccoli/tests/test_auto_MotionCorrection.py
|
Python
|
bsd-3-clause
| 1,430
|
# -*- coding: utf-8 -*-
from .subject import Subject
class Music(Subject):
target = 'music'
def __repr__(self):
return '<DoubanAPI Music>'
|
xlk521/cloudguantou
|
douban_client/api/music.py
|
Python
|
bsd-3-clause
| 160
|
#!/usr/bin/env python
#
# Copyright 2021 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Delete files from the temporary directory on a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @isdir==FALSE del @file"')
os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @isdir==TRUE rmdir /S /Q @file"')
else:
os.system(r'rm -rf /tmp/*')
|
google/skia-buildbot
|
scripts/run_on_swarming_bots/delete_tmp_dirs.py
|
Python
|
bsd-3-clause
| 562
|
"""
soupselect.py - https://code.google.com/p/soupselect/
CSS selector support for BeautifulSoup.
soup = BeautifulSoup('<html>...')
select(soup, 'div')
- returns a list of div elements
select(soup, 'div#main ul a')
- returns a list of links inside a ul inside div#main
"""
import re
from bs4 import BeautifulSoup
tag_re = re.compile('^[a-z0-9]+$')
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
def attribute_checker(operator, attribute, value=''):
"""
Takes an operator, attribute and optional value; returns a function that
will return True for elements that match that combination.
"""
return {
'=': lambda el: el.get(attribute) == value,
# attribute includes value as one of a set of space separated tokens
'~': lambda el: value in el.get(attribute, '').split(),
# attribute starts with value
'^': lambda el: el.get(attribute, '').startswith(value),
# attribute ends with value
'$': lambda el: el.get(attribute, '').endswith(value),
# attribute contains value
'*': lambda el: value in el.get(attribute, ''),
# attribute is either exactly value or starts with value-
'|': lambda el: el.get(attribute, '') == value \
or el.get(attribute, '').startswith('%s-' % value),
}.get(operator, lambda el: attribute in el)
def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if not tag:
tag = True
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag, klass = token.split('.', 1)
if not tag:
tag = True
found = []
for context in current_context:
found.extend(
context.findAll(tag,
{'class': lambda attr: attr and klass in attr.split()}
)
)
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
# Here we should just have a regular tag
if not tag_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
def monkeypatch(BeautifulSoupClass=None):
"""
If you don't explicitly state the class to patch, defaults to the most
common import location for BeautifulSoup.
"""
if not BeautifulSoupClass:
from BeautifulSoup import BeautifulSoup as BeautifulSoupClass
BeautifulSoupClass.findSelect = select
def unmonkeypatch(BeautifulSoupClass=None):
if not BeautifulSoupClass:
from BeautifulSoup import BeautifulSoup as BeautifulSoupClass
delattr(BeautifulSoupClass, 'findSelect')
def cssFind(html, selector):
"""
Parse ``html`` with class:`BeautifulSoup.BeautifulSoup` and use
:func:`.select` on the result.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
soup = BeautifulSoup(html)
return select(soup, selector)
def cssGet(html, selector):
"""
Same as :func:`.cssFind`, but returns the first match.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
try:
return cssFind(html, selector)[0]
except IndexError as e:
raise IndexError('Could not find {}.'.format(selector))
def cssExists(html, selector):
"""
Same as :func:`.cssFind`, but returns ``True`` if the selector matches at least one item.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
matches = cssFind(html, selector)
return bool(len(matches))
def prettyhtml(html):
return BeautifulSoup(html).prettify()
def normalize_whitespace(html):
return re.sub('(\s|\\xa0)+', ' ', html).strip()
|
devilry/devilry-django
|
devilry/project/develop/testhelpers/soupselect.py
|
Python
|
bsd-3-clause
| 5,265
|
def extractEmergencyExitsReleaseBlog(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractEmergencyExitsReleaseBlog.py
|
Python
|
bsd-3-clause
| 226
|
import pytest
import textwrap
import ctypes
import os
import windows
import windows.debug
import windows.generated_def as gdef
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from .conftest import generate_pop_and_exit_fixtures, pop_proc_32, pop_proc_64
from .pfwtest import *
proc32_debug = generate_pop_and_exit_fixtures([pop_proc_32], ids=["proc32dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
proc64_debug = generate_pop_and_exit_fixtures([pop_proc_64], ids=["proc64dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
if is_process_64_bits:
proc32_64_debug = generate_pop_and_exit_fixtures([pop_proc_32, pop_proc_64], ids=["proc32dbg", "proc64dbg"],
dwCreationFlags=gdef.DEBUG_PROCESS)
else:
# proc32_64_debug = proc32_debug
no_dbg_64_from_32 = lambda *x, **kwargs: pytest.skip("Cannot debug a proc64 from a 32b process")
proc32_64_debug = generate_pop_and_exit_fixtures([pop_proc_32, no_dbg_64_from_32], ids=["proc32dbg", "proc64dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
yolo = generate_pop_and_exit_fixtures([pop_proc_32, pop_proc_64], ids=["proc32dbg", "proc64dbg"], dwCreationFlags=gdef.CREATE_SUSPENDED)
DEFAULT_DEBUGGER_TIMEOUT = 10
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_init_breakpoint_callback(proc32_64_debug):
"""Checking that the initial breakpoint call `on_exception`"""
class MyDbg(windows.debug.Debugger):
def on_exception(self, exception):
assert exception.ExceptionRecord.ExceptionCode == gdef.EXCEPTION_BREAKPOINT
self.current_process.exit()
d = MyDbg(proc32_64_debug)
d.loop()
def get_debug_process_ndll(proc):
proc_pc = proc.threads[0].context.pc
ntdll_addr = proc.query_memory(proc_pc).AllocationBase
return windows.pe_parse.GetPEFile(ntdll_addr, target=proc)
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_simple_standard_breakpoint(proc32_64_debug):
"""Check that a standard Breakpoint method `trigger` is called with the correct informations"""
class TSTBP(windows.debug.Breakpoint):
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_process.read_memory(self.addr, 1) == b"\xcc"
assert dbg.current_thread.context.pc == self.addr
d.current_process.exit()
LdrLoadDll = get_debug_process_ndll(proc32_64_debug).exports["LdrLoadDll"]
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP(LdrLoadDll))
d.loop()
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_simple_hwx_breakpoint(proc32_64_debug):
"""Test that simple HXBP are trigger"""
class TSTBP(windows.debug.HXBreakpoint):
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == self.addr
assert dbg.current_thread.context.Dr7 != 0
d.current_process.exit()
LdrLoadDll = get_debug_process_ndll(proc32_64_debug).exports["LdrLoadDll"]
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP(LdrLoadDll))
d.loop()
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_multiple_hwx_breakpoint(proc32_64_debug):
"""Checking that multiple succesives HXBP are properly triggered"""
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == self.addr
assert dbg.current_thread.context.Dr7 != 0
assert TSTBP.COUNTER == self.expec_before
assert dbg.current_process.read_memory(self.addr, 1) != b"\xcc"
TSTBP.COUNTER += 1
if TSTBP.COUNTER == 4:
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 8)
d.add_bp(TSTBP(addr, 0))
d.add_bp(TSTBP(addr + 1, 1))
d.add_bp(TSTBP(addr + 2, 2))
d.add_bp(TSTBP(addr + 3, 3))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert TSTBP.COUNTER == 4
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_four_hwx_breakpoint_fail(proc32_64_debug):
"""Check that setting 4HXBP in the same thread fails"""
# print("test_four_hwx_breakpoint_fail {0}".format(proc32_64_debug))
class TSTBP(windows.debug.HXBreakpoint):
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
raise NotImplementedError("Should fail before")
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 8 + "\xc3")
d.add_bp(TSTBP(addr, 0))
d.add_bp(TSTBP(addr + 1, 1))
d.add_bp(TSTBP(addr + 2, 2))
d.add_bp(TSTBP(addr + 3, 3))
d.add_bp(TSTBP(addr + 4, 4))
proc32_64_debug.create_thread(addr, 0)
with pytest.raises(ValueError) as e:
d.loop()
assert "DRx" in e.value.args[0]
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_hwx_breakpoint_are_on_all_thread(proc32_64_debug):
"""Checking that HXBP without target are set on all threads"""
class MyDbg(windows.debug.Debugger):
def on_create_thread(self, exception):
# Check that later created thread have their HWX breakpoint :)
assert self.current_thread.context.Dr7 != 0
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
assert len(dbg.current_process.threads) != 1
#for t in dbg.current_process.threads:
# TEST_CASE.assertNotEqual(t.context.Dr7, 0)
if TSTBP.COUNTER == 0: #First time we got it ! create new thread
TSTBP.COUNTER = 1
dbg.current_process.create_thread(addr, 0)
else:
TSTBP.COUNTER += 1
d.current_process.exit()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 2 + "\xc3")
d.add_bp(TSTBP(addr, 0))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert TSTBP.COUNTER == 2
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
@pytest.mark.parametrize("bptype", [windows.debug.Breakpoint, windows.debug.HXBreakpoint])
def test_simple_breakpoint_name_addr(proc32_64_debug, bptype):
"""Check breakpoint address resolution for format dll!api"""
class TSTBP(bptype):
COUNTER = 0
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
LdrLoadDlladdr = dbg.current_process.peb.modules[1].pe.exports["LdrLoadDll"]
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == addr
assert LdrLoadDlladdr == addr
TSTBP.COUNTER += 1
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("ntdll!LdrLoadDll"))
d.loop()
assert TSTBP.COUNTER == 1
from . import dbg_injection
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_hardware_breakpoint_name_addr(proc32_64_debug):
"""Check that name addr in HXBP are trigger in all threads"""
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == dbg._resolve(self.addr, dbg.current_process)
TSTBP.COUNTER += 1
if TSTBP.COUNTER == 1:
# Perform a loaddll in a new thread :)
# See if it triggers a bp
t = dbg_injection.perform_manual_getproc_loadlib_for_dbg(dbg.current_process, "wintrust.dll")
self.new_thread = t
if hasattr(self, "new_thread") and dbg.current_thread.tid == self.new_thread.tid:
for t in dbg.current_process.threads:
assert t.context.Dr7 != 0
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("ntdll!LdrLoadDll"))
# Code that will load wintrust !
d.loop()
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_single_step(proc32_64_debug):
"""Check that BP/dbg can trigger single step and that instruction follows"""
NB_SINGLE_STEP = 3
class MyDbg(windows.debug.Debugger):
DATA = []
def on_single_step(self, exception):
# Check that later created thread have their HWX breakpoint :)
addr = exception.ExceptionRecord.ExceptionAddress
assert self.current_thread.context.pc == addr
if len(MyDbg.DATA) < NB_SINGLE_STEP:
MyDbg.DATA.append(addr)
return self.single_step()
self.current_process.exit()
return
class TSTBP(windows.debug.Breakpoint):
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
return dbg.single_step()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 3 + "\xc3")
d.add_bp(TSTBP(addr))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(MyDbg.DATA) == NB_SINGLE_STEP
for i in range(NB_SINGLE_STEP):
assert MyDbg.DATA[i] == addr + 1 + i
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
@pytest.mark.parametrize("bptype", [windows.debug.Breakpoint, windows.debug.HXBreakpoint])
def test_single_step_from_bp(proc32_64_debug, bptype):
"""Check that HXBPBP/dbg can trigger single step"""
NB_SINGLE_STEP = 3
class MyDbg(windows.debug.Debugger):
DATA = []
def on_single_step(self, exception):
# Check that later created thread have their HWX breakpoint :)
addr = exception.ExceptionRecord.ExceptionAddress
assert self.current_thread.context.pc == addr
if len(MyDbg.DATA) < NB_SINGLE_STEP:
MyDbg.DATA.append(addr)
return self.single_step()
self.current_process.exit()
return
# class TSTBP(windows.debug.HXBreakpoint):
class TSTBP(bptype):
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
return dbg.single_step()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 3 + "\xc3")
d.add_bp(TSTBP(addr))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(MyDbg.DATA) == NB_SINGLE_STEP
for i in range(NB_SINGLE_STEP):
assert MyDbg.DATA[i] == addr + 1 + i
# MEMBP
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_memory_breakpoint_write(proc32_64_debug):
"""Check MemoryBP WRITE"""
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_READONLY
#DEFAULT_PROTECT = PAGE_READONLY
DEFAULT_EVENTS = "W"
COUNTER = 0
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
eax = dbg.current_thread.context.func_result # Rax | Eax
if eax == 42:
dbg.current_process.exit()
return
assert fault_addr == data + eax
TSTBP.COUNTER += 1
return
if proc32_64_debug.bitness == 32:
asm, reg = (x86, "EAX")
else:
asm, reg = (x64, "RAX")
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
data = proc32_64_debug.virtual_alloc(0x1000)
injected = asm.MultipleInstr()
injected += asm.Mov(reg, 0)
injected += asm.Mov(asm.deref(data), reg)
injected += asm.Add(reg, 4)
injected += asm.Mov(asm.deref(data + 4), reg)
injected += asm.Add(reg, 4)
# This one should NOT trigger the MemBP of size 8
injected += asm.Mov(asm.deref(data + 8), reg)
injected += asm.Mov(reg, 42)
injected += asm.Mov(asm.deref(data), reg)
injected += asm.Ret()
proc32_64_debug.write_memory(addr, injected.get_code())
d.add_bp(TSTBP(data, size=0x8))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints for the good addresses
assert TSTBP.COUNTER == 2
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_memory_breakpoint_exec(proc32_64_debug):
"""Check MemoryBP EXEC"""
NB_NOP_IN_PAGE = 3
class TSTBP(windows.debug.MemoryBreakpoint):
"""Check that BP/dbg can trigger single step and that instruction follows"""
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "X"
DATA = []
def trigger(self, dbg, exc):
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
TSTBP.DATA.append(fault_addr)
if len(TSTBP.DATA) == NB_NOP_IN_PAGE + 1:
dbg.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * NB_NOP_IN_PAGE + "\xc3")
d.add_bp(TSTBP(addr, size=0x1000))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(TSTBP.DATA) == NB_NOP_IN_PAGE + 1
for i in range(NB_NOP_IN_PAGE + 1):
assert TSTBP.DATA[i] == addr + i
# breakpoint remove
import threading
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
@python_injection
@pytest.mark.parametrize("bptype", [windows.debug.FunctionParamDumpHXBP, windows.debug.FunctionParamDumpBP])
def test_standard_breakpoint_self_remove(proc32_64_debug, bptype):
data = set()
def do_check():
proc32_64_debug.execute_python_unsafe("open(u'FILENAME1')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME2')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME3')").wait()
proc32_64_debug.exit()
class TSTBP(bptype):
TARGET = windows.winproxy.CreateFileW
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
ctx = dbg.current_thread.context
filename = self.extract_arguments(dbg.current_process, dbg.current_thread)["lpFileName"]
data.add(filename)
if filename == u"FILENAME2":
dbg.del_bp(self)
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("kernelbase!CreateFileW"))
threading.Thread(target=do_check).start()
d.loop()
assert data >= set([u"FILENAME1", u"FILENAME2"])
assert u"FILENAME3" not in data
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
@python_injection
@pytest.mark.parametrize("bptype", [windows.debug.FunctionParamDumpHXBP, windows.debug.FunctionParamDumpBP])
def test_standard_breakpoint_remove(proc32_64_debug, bptype):
data = set()
def do_check():
proc32_64_debug.execute_python_unsafe("open(u'FILENAME1')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME2')").wait()
d.del_bp(the_bp)
proc32_64_debug.execute_python_unsafe("open(u'FILENAME3')").wait()
proc32_64_debug.exit()
class TSTBP(bptype):
TARGET = windows.winproxy.CreateFileW
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
ctx = dbg.current_thread.context
filename = self.extract_arguments(dbg.current_process, dbg.current_thread)["lpFileName"]
data.add(filename)
d = windows.debug.Debugger(proc32_64_debug)
the_bp = TSTBP("kernelbase!CreateFileW")
# import pdb;pdb.set_trace()
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data >= set([u"FILENAME1", u"FILENAME2"])
assert u"FILENAME3" not in data
def get_generate_read_at_for_proc(target):
if target.bitness == 32:
def generate_read_at(addr):
res = x86.MultipleInstr()
res += x86.Mov("EAX", x86.deref(addr))
res += x86.Ret()
return res.get_code()
else:
def generate_read_at(addr):
res = x64.MultipleInstr()
res += x64.Mov("RAX", x64.deref(addr))
res += x64.Ret()
return res.get_code()
return generate_read_at
def get_generate_write_at_for_proc(target):
if target.bitness == 32:
def generate_write_at(addr):
res = x86.MultipleInstr()
res += x86.Mov(x86.deref(addr), "EAX")
res += x86.Ret()
return res.get_code()
else:
def generate_write_at(addr):
res = x64.MultipleInstr()
res += x64.Mov(x64.deref(addr), "RAX")
res += x64.Ret()
return res.get_code()
return generate_write_at
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_mem_breakpoint_remove(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 4)).wait()
d.del_bp(the_bp)
proc32_64_debug.execute(generate_read_at(data_addr + 8)).wait()
proc32_64_debug.exit()
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
data.append(fault_addr)
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_bp = TSTBP(data_addr, size=0x1000)
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data == [data_addr, data_addr + 4]
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_mem_breakpoint_self_remove(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 4)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 8)).wait()
proc32_64_debug.exit()
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
data.append(fault_addr)
if fault_addr == data_addr + 4:
dbg.del_bp(self)
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_bp = TSTBP(data_addr, size=0x1000)
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data == [data_addr, data_addr + 4]
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_read_write_bp_same_page(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
generate_write_at = get_generate_write_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_write_at(data_addr + 4)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 0x500)).wait()
proc32_64_debug.execute(generate_write_at(data_addr + 0x504)).wait()
proc32_64_debug.exit()
class MemBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
#print("Got <{0:#x}> <{1}>".format(fault_addr, exc.ExceptionRecord.ExceptionInformation[0]))
data.append((self, fault_addr))
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_write_bp = MemBP(data_addr + 0x500, size=0x500, events="W")
the_read_bp = MemBP(data_addr, size=0x500, events="RW")
d.add_bp(the_write_bp)
d.add_bp(the_read_bp)
threading.Thread(target=do_check).start()
d.loop()
# generate_read_at (data_addr + 0x500)) (write_bp (PAGE_READONLY)) should not be triggered
expected_result = [(the_read_bp, data_addr), (the_read_bp, data_addr + 4),
(the_write_bp, data_addr + 0x504)]
assert data == expected_result
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_exe_in_module_list(proc32_64_debug):
class MyDbg(windows.debug.Debugger):
def on_exception(self, exception):
exename = os.path.basename(proc32_64_debug.peb.imagepath.str)
assert exename.endswith(".exe")
exename = exename[:-len(".exe")] # Remove the .exe from the module name
this_process_modules = self._module_by_process[self.current_process.pid]
assert exename and exename in this_process_modules.keys()
self.current_process.exit()
d = MyDbg(proc32_64_debug)
d.loop()
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_bp_exe_by_name(proc32_64_debug):
class TSTBP(windows.debug.Breakpoint):
COUNTER = 0
def trigger(self, dbg, exc):
TSTBP.COUNTER += 1
assert TSTBP.COUNTER == 1
# Kill the target in 0.5s
# It's not too long
# It's long enought to get trigger being recalled if implem is broken
threading.Timer(0.5, proc32_64_debug.exit).start()
exepe = proc32_64_debug.peb.exe
entrypoint = exepe.get_OptionalHeader().AddressOfEntryPoint
exename = os.path.basename(proc32_64_debug.peb.imagepath.str)
assert exename.endswith(".exe")
exename = exename[:-len(".exe")] # Remove the .exe from the module name
d = windows.debug.Debugger(proc32_64_debug)
# The goal is to test bp of format 'exename!offset' so we craft a string based on the entrypoint
d.add_bp(TSTBP("{name}!{offset}".format(name=exename, offset=entrypoint)))
d.loop()
assert TSTBP.COUNTER == 1
@pytest.mark.timeout(DEFAULT_DEBUGGER_TIMEOUT)
def test_keyboardinterrupt_when_bp_event(proc32_64_debug, monkeypatch):
class ShouldNotTrigger(windows.debug.Breakpoint):
COUNTER = 0
def trigger(self, dbg, exc):
raise ValueError("This BP should not trigger in this test !")
real_WaitForDebugEvent = windows.winproxy.WaitForDebugEvent
def WaitForDebugEvent_KeyboardInterrupt(debug_event):
real_WaitForDebugEvent(debug_event)
if not debug_event.dwDebugEventCode == gdef.EXCEPTION_DEBUG_EVENT:
return
if not debug_event.u.Exception.ExceptionRecord.ExceptionCode in [gdef.EXCEPTION_BREAKPOINT, gdef.STATUS_WX86_BREAKPOINT]:
return # Not a BP
if debug_event.u.Exception.ExceptionRecord.ExceptionAddress == addr:
# Our own breakpoint
# Trigger the fake Ctrl+c
raise KeyboardInterrupt("TEST BP")
xx = monkeypatch.setattr(windows.winproxy, "WaitForDebugEvent", WaitForDebugEvent_KeyboardInterrupt)
# This should emultate a ctrl+c on when waiting for the event
# Our goal is to set the target back to a good state :)
TEST_CODE = b"\xeb\xfe\xff\xff\xff\xff\xff" # Loop + invalid instr
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, TEST_CODE)
d = windows.debug.Debugger(proc32_64_debug)
bad_thread = proc32_64_debug.create_thread(addr, 0)
d.add_bp(ShouldNotTrigger(addr))
d.kill_on_exit(False)
try:
d.loop()
except KeyboardInterrupt as e:
for t in proc32_64_debug.threads:
t.suspend()
d.detach()
# So we have detached when a BP was triggered
# We should have the original memory under the BP
# We should have EIP/RIP decremented by one (should be at <addr> not <addr+1>
assert proc32_64_debug.read_memory(addr, len(TEST_CODE)) == TEST_CODE
assert bad_thread.context.pc == addr
else:
raise ValueError("Should have raised")
|
hakril/PythonForWindows
|
tests/test_debugger.py
|
Python
|
bsd-3-clause
| 25,193
|
from collections import defaultdict
from django.conf import settings
from mongodbforms.documentoptions import DocumentMetaWrapper, LazyDocumentMetaWrapper
from mongodbforms.fieldgenerator import MongoDefaultFormFieldGenerator
try:
from django.utils.module_loading import import_by_path
except ImportError:
# this is only in Django's devel version for now
# and the following code comes from there. Yet it's too nice to
# pass on this. So we do define it here for now.
import sys
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils import six
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated
by the last name in the path. Raise ImproperlyConfigured if something
goes wrong.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("%s%s doesn't look like a module path" %
(error_prefix, dotted_path))
try:
module = import_module(module_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, module_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured(
'%sModule "%s" does not define a "%s" attribute/class' %
(error_prefix, module_path, class_name))
return attr
def load_field_generator():
if hasattr(settings, 'MONGODBFORMS_FIELDGENERATOR'):
return import_by_path(settings.MONGODBFORMS_FIELDGENERATOR)
return MongoDefaultFormFieldGenerator
def init_document_options(document):
if not isinstance(document._meta, (DocumentMetaWrapper, LazyDocumentMetaWrapper)):
document._meta = DocumentMetaWrapper(document)
return document
def get_document_options(document):
return DocumentMetaWrapper(document)
def format_mongo_validation_errors(validation_exception):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join([
generate_key(v, k) for k, v in value.iteritems()
])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in validation_exception.to_dict().iteritems():
error_dict[generate_key(v)].append(k)
return ["%s: %s" % (k, v) for k, v in error_dict.iteritems()]
# Taken from six (https://pypi.python.org/pypi/six)
# by "Benjamin Peterson <benjamin@python.org>"
#
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
|
fmoro/django-mongodbforms
|
mongodbforms/util.py
|
Python
|
bsd-3-clause
| 4,265
|
from __future__ import absolute_import
import logging
import re
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.conf.urls import patterns, include, url
from sentry.plugins import plugins
logger = logging.getLogger("sentry.plugins")
def ensure_url(u):
if isinstance(u, (tuple, list)):
return url(*u)
elif not isinstance(u, (RegexURLResolver, RegexURLPattern)):
raise TypeError(
"url must be RegexURLResolver or RegexURLPattern, not %r: %r" % (type(u).__name__, u)
)
return u
def load_plugin_urls(plugins):
urlpatterns = patterns("")
for plugin in plugins:
try:
urls = plugin.get_group_urls()
if not urls:
continue
urls = [ensure_url(u) for u in urls]
except Exception:
logger.exception("routes.failed", extra={"plugin": type(plugin).__name__})
else:
urlpatterns.append(url(r"^%s/" % re.escape(plugin.slug), include(urls)))
return urlpatterns
urlpatterns = load_plugin_urls(plugins.all())
|
mvaled/sentry
|
src/sentry/plugins/base/group_api_urls.py
|
Python
|
bsd-3-clause
| 1,093
|
from twisted.application.service import Application
from twisted.application.internet import TimerService, TCPServer
from twisted.web import server
from twisted.python import log
from scrapy.utils.misc import load_object
from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment
from .launcher import Launcher
from .eggstorage import FilesystemEggStorage
from .scheduler import SpiderScheduler
from .poller import QueuePoller
from .environ import Environment
from .website import Root
from .config import Config
def application(config):
app = Application("Scrapyd")
http_port = config.getint('http_port', 6800)
bind_address = config.get('bind_address', '0.0.0.0')
poller = QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
laupath = config.get('launcher', 'scrapyd.launcher.Launcher')
laucls = load_object(laupath)
launcher = laucls(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address)
log.msg("Scrapyd web console available at http://%s:%s/" % (bind_address, http_port))
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app
|
mouadino/scrapy
|
scrapyd/app.py
|
Python
|
bsd-3-clause
| 1,522
|
import decimal
import os
from contextlib import contextmanager
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from mock import patch
from configurations.values import (Value, BooleanValue, IntegerValue,
FloatValue, DecimalValue, ListValue,
TupleValue, SetValue, DictValue,
URLValue, EmailValue, IPValue,
RegexValue, PathValue, SecretValue,
DatabaseURLValue, EmailURLValue,
CacheURLValue, BackendsValue,
CastingMixin, SearchURLValue)
@contextmanager
def env(**kwargs):
with patch.dict(os.environ, clear=True, **kwargs):
yield
class FailingCasterValue(CastingMixin, Value):
caster = 'non.existing.caster'
class ValueTests(TestCase):
def test_value(self):
value = Value('default', environ=False)
self.assertEqual(value.setup('TEST'), 'default')
with env(DJANGO_TEST='override'):
self.assertEqual(value.setup('TEST'), 'default')
@patch.dict(os.environ, clear=True, DJANGO_TEST='override')
def test_env_var(self):
value = Value('default')
self.assertEqual(value.setup('TEST'), 'override')
self.assertNotEqual(value.setup('TEST'), value.default)
self.assertEqual(value.to_python(os.environ['DJANGO_TEST']),
value.setup('TEST'))
def test_value_reuse(self):
value1 = Value('default')
value2 = Value(value1)
self.assertEqual(value1.setup('TEST1'), 'default')
self.assertEqual(value2.setup('TEST2'), 'default')
with env(DJANGO_TEST1='override1', DJANGO_TEST2='override2'):
self.assertEqual(value1.setup('TEST1'), 'override1')
self.assertEqual(value2.setup('TEST2'), 'override2')
def test_env_var_prefix(self):
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, TEST='override'):
value = Value('default', environ_prefix='')
self.assertEqual(value.setup('TEST'), 'override')
def test_boolean_values_true(self):
value = BooleanValue(False)
for truthy in value.true_values:
with env(DJANGO_TEST=truthy):
self.assertTrue(value.setup('TEST'))
def test_boolean_values_faulty(self):
self.assertRaises(ValueError, BooleanValue, 'false')
def test_boolean_values_false(self):
value = BooleanValue(True)
for falsy in value.false_values:
with env(DJANGO_TEST=falsy):
self.assertFalse(value.setup('TEST'))
def test_boolean_values_nonboolean(self):
value = BooleanValue(True)
with env(DJANGO_TEST='nonboolean'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_integer_values(self):
value = IntegerValue(1)
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), 2)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_float_values(self):
value = FloatValue(1.0)
with env(DJANGO_TEST='2.0'):
self.assertEqual(value.setup('TEST'), 2.0)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_decimal_values(self):
value = DecimalValue(decimal.Decimal(1))
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), decimal.Decimal(2))
with env(DJANGO_TEST='nondecimal'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_failing_caster(self):
self.assertRaises(ImproperlyConfigured, FailingCasterValue)
def test_list_values_default(self):
value = ListValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_list_values_separator(self):
value = ListValue(separator=':')
with env(DJANGO_TEST='/usr/bin:/usr/sbin:/usr/local/bin'):
self.assertEqual(value.setup('TEST'),
['/usr/bin', '/usr/sbin', '/usr/local/bin'])
def test_List_values_converter(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2, 2])
value = ListValue(converter=float)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2.0, 2.0])
def test_list_values_custom_converter(self):
value = ListValue(converter=lambda x: x * 2)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['22', '22'])
def test_list_values_converter_exception(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,b'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_tuple_values_default(self):
value = TupleValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_set_values_default(self):
value = SetValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), set())
def test_dict_values_default(self):
value = DictValue()
with env(DJANGO_TEST='{2: 2}'):
self.assertEqual(value.setup('TEST'), {2: 2})
expected = {2: 2, '3': '3', '4': [1, 2, 3]}
with env(DJANGO_TEST="{2: 2, '3': '3', '4': [1, 2, 3]}"):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST="""{
2: 2,
'3': '3',
'4': [1, 2, 3],
}"""):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), {})
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_email_values(self):
value = EmailValue('spam@eg.gs')
with env(DJANGO_TEST='spam@sp.am'):
self.assertEqual(value.setup('TEST'), 'spam@sp.am')
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values(self):
value = URLValue('http://eggs.spam')
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
with env(DJANGO_TEST='httb://spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_ip_values(self):
value = IPValue('0.0.0.0')
with env(DJANGO_TEST='127.0.0.1'):
self.assertEqual(value.setup('TEST'), '127.0.0.1')
with env(DJANGO_TEST='::1'):
self.assertEqual(value.setup('TEST'), '::1')
with env(DJANGO_TEST='spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_regex_values(self):
value = RegexValue('000--000', regex=r'\d+--\d+')
with env(DJANGO_TEST='123--456'):
self.assertEqual(value.setup('TEST'), '123--456')
with env(DJANGO_TEST='123456'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_with_check(self):
value = PathValue()
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/'):
self.assertEqual(value.setup('TEST'), os.path.expanduser('~'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_no_check(self):
value = PathValue(check_exists=False)
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/spam/eggs'):
self.assertEqual(value.setup('TEST'),
os.path.join(os.path.expanduser('~'),
'spam', 'eggs'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertEqual(value.setup('TEST'), '/does/not/exist')
def test_secret_value(self):
self.assertRaises(ValueError, SecretValue, 'default')
value = SecretValue()
self.assertRaises(ValueError, value.setup, 'TEST')
with env(DJANGO_SECRET_KEY='123'):
self.assertEqual(value.setup('SECRET_KEY'), '123')
value = SecretValue(environ_name='FACEBOOK_API_SECRET',
environ_prefix=None)
self.assertRaises(ValueError, value.setup, 'TEST')
with env(FACEBOOK_API_SECRET='123'):
self.assertEqual(value.setup('TEST'), '123')
def test_database_url_value(self):
value = DatabaseURLValue()
self.assertEqual(value.default, {})
with env(DATABASE_URL='sqlite://'):
self.assertEqual(value.setup('DATABASE_URL'), {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'HOST': None,
'NAME': ':memory:',
'PASSWORD': None,
'PORT': None,
'USER': None,
}})
def test_email_url_value(self):
value = EmailURLValue()
self.assertEqual(value.default, {})
with env(EMAIL_URL='smtps://user@domain.com:password@smtp.example.com:587'):
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': 'smtp.example.com',
'EMAIL_HOST_PASSWORD': 'password',
'EMAIL_HOST_USER': 'user@domain.com',
'EMAIL_PORT': 587,
'EMAIL_USE_TLS': True})
with env(EMAIL_URL='console://'):
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.console.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': None,
'EMAIL_HOST_PASSWORD': None,
'EMAIL_HOST_USER': None,
'EMAIL_PORT': None,
'EMAIL_USE_TLS': False})
with env(EMAIL_URL='smtps://user@domain.com:password@smtp.example.com:wrong'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_cache_url_value(self):
cache_setting = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'KEY_PREFIX': '',
'LOCATION': 'user@host:port:1'
}
}
cache_url = 'redis://user@host:port/1'
value = CacheURLValue(cache_url)
self.assertEqual(value.default, cache_setting)
value = CacheURLValue()
self.assertEqual(value.default, {})
with env(CACHE_URL='redis://user@host:port/1'):
self.assertEqual(value.setup('CACHE_URL'), cache_setting)
with env(CACHE_URL='wrong://user@host:port/1'):
self.assertRaises(KeyError, value.setup, 'TEST')
def test_search_url_value(self):
value = SearchURLValue()
self.assertEqual(value.default, {})
with env(SEARCH_URL='elasticsearch://127.0.0.1:9200/index'):
self.assertEqual(value.setup('SEARCH_URL'), {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200',
'INDEX_NAME': 'index',
}})
def test_backend_list_value(self):
backends = ['django.middleware.common.CommonMiddleware']
value = BackendsValue(backends)
self.assertEqual(value.setup('TEST'), backends)
backends = ['non.existing.Backend']
self.assertRaises(ValueError, BackendsValue, backends)
|
luzfcb/django-configurations
|
tests/test_values.py
|
Python
|
bsd-3-clause
| 12,772
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_TLSP,
IP_DSCP_CS3)
def of_demo_8():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 8 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
eth_type = ETH_TYPE_IPv4
eth_src = "00:1c:01:00:23:aa"
eth_dst = "00:02:02:60:ff:fe"
ipv4_src = "10.0.245.1/24"
ipv4_dst = "192.168.1.123/16"
ip_proto = IP_PROTO_TLSP
ip_dscp = IP_DSCP_CS3 # 'Class Selector' = 'Flash'
input_port = 13
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp,
input_port))
print (" Action: Output (CONTROLLER)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 15
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_priority(flow_priority=1006)
flow_entry.set_flow_cookie(cookie=100)
flow_entry.set_flow_cookie_mask(cookie_mask=255)
# --- Instruction: 'Apply-actions'
# Action: 'Output' to CONTROLLER
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="CONTROLLER", max_len=60)
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_in_port(input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_8()
|
gaberger/pybvc
|
samples/sampleopenflow/demos/demo8.py
|
Python
|
bsd-3-clause
| 7,431
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simphony-remote documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 7 14:01:45 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.append(os.path.abspath('../../'))
from tornadowebapi import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Simphony-remote'
copyright = '2016, SimPhoNy Project'
author = 'SimPhoNy Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{}.{}'.format(*__version__.split(".")[0:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Simphony-remote v0.1.0dev1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/simphony_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Simphony-remotedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Simphony-remote.tex', 'Simphony-remote Documentation',
'SimPhoNy Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simphony-remote', 'Simphony-remote Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Simphony-remote', 'Simphony-remote Documentation',
author, 'Simphony-remote', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
simphony/tornado-webapi
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 12,106
|
try:
from ttag.args import Arg, BasicArg, BooleanArg, ConstantArg, DateArg, \
DateTimeArg, IntegerArg, IsInstanceArg, KeywordsArg, \
ModelInstanceArg, StringArg, TimeArg, MultiArg
from ttag.core import Tag
from ttag.exceptions import TagArgumentMissing, TagValidationError
from ttag import helpers
except ImportError:
# This allows setup.py to skip import errors which may occur if ttag is
# being installed at the same time as Django.
pass
VERSION = (3, 0)
def get_version(number_only=False):
version = [str(VERSION[0])]
number = True
for bit in VERSION[1:]:
if not isinstance(bit, int):
if number_only:
break
number = False
version.append(number and '.' or '-')
version.append(str(bit))
return ''.join(version)
|
lincolnloop/django-ttag
|
ttag/__init__.py
|
Python
|
bsd-3-clause
| 840
|
from django.core.cache import cache
def pytest_runtest_setup(item):
# Clear the cache before every test
cache.clear()
|
mozilla/standup
|
standup/status/tests/conftest.py
|
Python
|
bsd-3-clause
| 128
|
from django import template
from django.template.base import Node, Template, TemplateSyntaxError
from django.conf import settings
register = template.Library()
class PlugItIncludeNode(Node):
def __init__(self, action):
self.action = action
def render(self, context):
action = self.action.resolve(context)
# Load plugIt object
if settings.PIAPI_STANDALONE:
# Import objects form the view
from plugit_proxy.views import plugIt, baseURI
else:
# Import object using the function in the view
from plugit_proxy.views import getPlugItObject
# Check the secret
from plugit_proxy.utils import create_secret
if context['ebuio_hpro_key'] != create_secret(str(context['ebuio_hpro_pk']), context['ebuio_hpro_name'], str(context['ebuio_u'].pk)):
return ''
(plugIt, _, _) = getPlugItObject(context['ebuio_hpro_pk'])
templateContent = plugIt.getTemplate(action)
template = Template(templateContent)
return template.render(context)
@register.tag
def plugitInclude(parser, token):
"""
Load and render a template, using the same context of a specific action.
Example: {% plugitInclude "/menuBar" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'plugitInclude' tag takes one argument: the tempalte's action to use")
action = parser.compile_filter(bits[1])
return PlugItIncludeNode(action)
@register.assignment_tag
def plugitGetUser(pk):
if settings.PIAPI_STANDALONE:
from plugit_proxy.views import generate_user
user = generate_user(pk=str(pk))
else:
from users.models import TechUser
try:
user = TechUser.objects.get(pk=pk)
except Exception:
return None
# Return only wanted properties about the user
class User():
pass
user_cleaned = User()
for prop in settings.PIAPI_USERDATA:
if hasattr(user, prop):
setattr(user_cleaned, prop, getattr(user, prop))
user_cleaned.id = str(user_cleaned.pk) if hasattr(user_cleaned, 'pk') else None
return user_cleaned
@register.filter
def url_target_blank(text):
return text.replace('<a ', '<a target="_blank" ')
|
ebu/PlugIt
|
plugit_proxy/templatetags/plugit_tags.py
|
Python
|
bsd-3-clause
| 2,347
|
"""The WaveBlocks Project
Compute some observables like norm, kinetic and potential energy
of Hagedorn wavepackets. This class implements the mixed case
where the bra does not equal the ket.
@author: R. Bourquin
@copyright: Copyright (C) 2014, 2016 R. Bourquin
@license: Modified BSD License
"""
from functools import partial
from numpy import squeeze, sum
from WaveBlocksND.Observables import Observables
__all__ = ["ObservablesMixedHAWP"]
class ObservablesMixedHAWP(Observables):
r"""This class implements the mixed case observable computation
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle` for Hagedorn
wavepackets :math:`\Psi` where the bra :math:`\Psi` does not equal
the ket :math:`\Psi^{\prime}`.
"""
def __init__(self, *, innerproduct=None, gradient=None):
r"""Initialize a new :py:class:`ObservablesMixedHAWP` instance for observable computation of Hagedorn wavepackets.
"""
self._innerproduct = None
self._gradient = None
def set_innerproduct(self, innerproduct):
r"""Set the innerproduct.
:param innerproduct: An inner product for computing the integrals. The inner product is used
for the computation of all brakets
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle`.
:type innerproduct: A :py:class:`InnerProduct` subclass instance.
.. note:: Make sure to use an inhomogeneous inner product here.
"""
self._innerproduct = innerproduct
def set_gradient(self, gradient):
r"""Set the gradient.
:param gradient: A gradient operator. The gradient is only used for the computation of the kinetic
energy :math:`\langle \Psi | T | \Psi^{\prime} \rangle`.
:type gradient: A :py:class:`Gradient` subclass instance.
"""
self._gradient = gradient
def overlap(self, pacbra, packet, *, component=None, summed=False):
r"""Calculate the overlap :math:`\langle \Psi | \Psi^{\prime} \rangle` of the wavepackets
:math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the overlap integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the overlap integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` whose overlap is
computed. The default value is ``None`` which means to compute the
overlaps with all :math:`N` components involved.
:type component: Integer or ``None``.
:param summed: Whether to sum up the overlaps :math:`\langle \Phi_i | \Phi_i^{\prime} \rangle`
of the individual components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: The overlap of :math:`\Psi` with :math:`\Psi^{\prime}` or the overlap of :math:`\Phi_i`
with :math:`\Phi_i^{\prime}` or a list with the :math:`N` overlaps of all components.
(Depending on the optional arguments.)
"""
return self._innerproduct.quadrature(pacbra, packet, diag_component=component, diagonal=True, summed=summed)
def norm(self, wavepacket, *, component=None, summed=False):
r"""Calculate the :math:`L^2` norm :math:`\langle \Psi | \Psi \rangle` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the norm.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose norm is computed.
The default value is ``None`` which means to compute the norms of all :math:`N` components.
:type component: int or ``None``.
:param summed: Whether to sum up the norms :math:`\langle \Phi_i | \Phi_i \rangle` of the
individual components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: The norm of :math:`\Psi` or the norm of :math:`\Phi_i` or a list with the :math:`N`
norms of all components. (Depending on the optional arguments.)
.. note:: This method just redirects to a call to :py:meth:`HagedornWavepacketBase.norm`.
"""
return wavepacket.norm(component=component, summed=summed)
def kinetic_overlap_energy(self, pacbra, packet, *, component=None, summed=False):
r"""Compute the kinetic energy overlap :math:`\langle \Psi | T | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the kinetic energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the kinetic energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
kinetic energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energy overlap integrals of the individual components or
the overall kinetic energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
if component is None:
components = range(Nbra)
else:
components = [component]
ekin = []
for n in components:
gradpacbra = self._gradient.apply_gradient(pacbra, component=n)
gradpacket = self._gradient.apply_gradient(packet, component=n)
Q = [self._innerproduct.quadrature(gpb, gpk, diag_component=n) for gpb, gpk in zip(gradpacbra, gradpacket)]
ekin.append(0.5 * sum(Q))
if summed is True:
ekin = sum(ekin)
elif component is not None:
# Do not return a list for specific single components
ekin = ekin[0]
return ekin
def kinetic_energy(self, wavepacket, *, component=None, summed=False):
r"""Compute the kinetic energy :math:`E_{\text{kin}} := \langle \Psi | T | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the kinetic energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose
kinetic energy we compute. If set to ``None`` the
computation is performed for all :math:`N` components.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energies of the individual components or the
overall kinetic energy of the wavepacket. (Depending on the optional arguments.)
.. note:: This method just expands to a call of the :py:meth:`ObservablesMixedHAWP.kinetic_overlap_energy`
method. Better use :py:meth:`ObservablesHAWP.kinetic_energy`.
"""
return self.kinetic_overlap_energy(wavepacket, wavepacket, component=component, summed=summed)
def potential_overlap_energy(self, pacbra, packet, potential, *, component=None, summed=False):
r"""Compute the potential energy overlap :math:`\langle \Psi | V(x) | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the potential energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the potential energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param potential: The potential :math:`V(x)`. (Actually, not the potential object itself
but one of its ``V.evaluate_*`` methods.)
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
potential energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the potential energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the potential energy overlap integrals of the individual components or
the overall potential energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
# TODO: Better take 'V' instead of 'V.evaluate_at' as argument?
# f = partial(potential.evaluate_at, as_matrix=True)
f = partial(potential, as_matrix=True)
# Compute the brakets for each component
if component is not None:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, diag_component=component, eval_at_once=True)
Q = [squeeze(Q)]
else:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, eval_at_once=True)
Q = list(map(squeeze, Q))
# And don't forget the summation in the matrix multiplication of 'operator' and 'ket'
# TODO: Should this go inside the innerproduct?
epot = [sum(Q[i * Nket:(i + 1) * Nket]) for i in range(Nbra)]
if summed is True:
epot = sum(epot)
elif component is not None:
# Do not return a list for specific single components
epot = epot[0]
return epot
def potential_energy(self, wavepacket, potential, *, component=None, summed=False):
r"""Compute the potential energy :math:`E_{\text{pot}} := \langle \Psi | V(x) | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the potential energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param potential: The potential :math:`V(x)`. (Actually, not the potential object itself
but one of its ``V.evaluate_*`` methods.)
:param component: The index :math:`i` of the component :math:`\Phi_i` whose
potential energy we compute. If set to ``None`` the
computation is performed for all :math:`N` components.
:type component: Integer or ``None``.
:param summed: Whether to sum up the potential energies :math:`E_i` of the individual
components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: A list of the potential energies of the individual components or the
overall potential energy of the wavepacket. (Depending on the optional arguments.)
.. note:: This method just expands to a call of the :py:meth:`ObservablesMixedHAWP.potential_overlap_energy`
method. Better use :py:meth:`ObservablesHAWP.potential_energy`.
"""
return self.potential_overlap_energy(wavepacket, wavepacket, potential, component=component, summed=summed)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/ObservablesMixedHAWP.py
|
Python
|
bsd-3-clause
| 13,370
|
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics import *
from nodebox.gui.controls import *
# Comparison between Rows and Row containers.
# Both are subclasses of Layout.
# Panel 1
# Controls in a Rows layout are drawn below each other.
# Rows.width defines the width of all controls (individual width is ignored).
# Note how the second Field has a height and wrap=True,
# which makes it a multi-line field with text wrapping.
panel1 = Panel("Panel 1", x=30, y=350)
panel1.append(
Rows([
Field(value="", hint="subject"),
Field(value="", hint="message", height=70, id="field_msg1", wrap=True),
Button("Send"),
], width=200)
)
panel1.pack()
# Panel 2
# Controls in a Row layout are drawn next to each other.
# Row.width defines the width of all controls (individual width is ignored).
# This means that each column has the same width.
# Note the align=TOP, which vertically aligns each column at the top (default is CENTER).
panel2 = Panel("Panel 2", x=30, y=200)
panel2.append(
Row([
Field(value="", hint="message", height=70, id="field_msg2", wrap=True),
Button("Send", width=400),
], width=200, align=TOP)
)
panel2.pack()
# Panel 3
# If you need columns of a different width, put a Layout in a column,
# in other words a Row or Rows nested inside a Row or Rows.
# Then put your controls in the nested layout,
# the layout's width will override the column width setting.
panel3 = Panel("Panel 3", x=30, y=30)
panel3.append(
Row([ # Field will be 200 wide, the Row column width setting.
Field(value="", hint="message", height=70, id="field_msg2", wrap=True),
("Actions:", Rows([
Button("Send"), # However, buttons will be 100 wide,
Button("Save") # because their Rows parent says so.
], width=100))
], width=200, align=TOP)
)
panel3.pack()
# Panel 4
# Without layouts, you are free to draw controls wherever you want in a panel.
# Panel.pack() will make sure that the panel fits snuggly around the controls.
# In this case, we place a button on the panel, with a field above it (hence y=40).
# The field has its own dimensions (width=300 and height=50).
panel4 = Panel("Panel 4", x=400, y=30)
panel4.extend([
Field(value="", hint="message", y=40, width=300, height=50, id="field_msg3", wrap=True, reserved=[]),
Button("Send")
])
panel4.pack()
# Note the reserved=[] with the field.
# By default, fields have ENTER and TAB keys reserved:
# enter fires Field.on_action(), tab moves away from the field.
# By clearing the reserved list we can type enter and tab inside the field.
# Panel 5
# If you don't pack the panel, you have to set its width and height manually,
# as well as the position of all controls:
panel5 = Panel("Panel 5", x=500, y=200, width=200, height=150)
panel5.extend([
Field(value="", hint="message", x=10, y=60, width=180, height=50, id="field_msg3", wrap=True),
Button("Send", x=10, y=20, width=180)
])
def draw(canvas):
canvas.clear()
canvas.append(panel1)
canvas.append(panel2)
canvas.append(panel3)
canvas.append(panel4)
canvas.append(panel5)
canvas.size = 800, 600
canvas.run(draw)
|
pepsipepsi/nodebox_opengl_python3
|
examples/10-gui/05-layout.py
|
Python
|
bsd-3-clause
| 3,183
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from rest_framework.response import Response
from sentry.api.base import Endpoint
from sentry.api.permissions import assert_perm
from sentry.api.serializers import serialize
from sentry.models import Group, GroupStatus, Project, Team
class TeamGroupsNewEndpoint(Endpoint):
def get(self, request, team_id):
"""
Return a list of the newest groups for a given team.
The resulting query will find groups which have been seen since the
cutoff date, and then sort those by score, returning the highest scoring
groups first.
"""
team = Team.objects.get_from_cache(
id=team_id,
)
assert_perm(team, request.user, request.auth)
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(user=request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
cutoff = timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_dict.keys(),
status=GroupStatus.UNRESOLVED,
active_at__gte=cutoff_dt,
).extra(
select={'sort_value': 'score'},
).order_by('-score', '-first_seen')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
return Response(serialize(group_list, request.user))
|
camilonova/sentry
|
src/sentry/api/endpoints/team_groups_new.py
|
Python
|
bsd-3-clause
| 1,628
|
"""
Checking for connected components in a graph.
"""
__author__ = "Sergio J. Rey <srey@asu.edu>"
__all__ = ["check_contiguity"]
from operator import lt
def is_component(w, ids):
"""Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component
"""
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import pysal.lib as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids)
class Graph(object):
def __init__(self, undirected=True):
self.nodes = set()
self.edges = {}
self.cluster_lookup = {}
self.no_link = {}
self.undirected = undirected
def add_edge(self, n1, n2, w):
self.nodes.add(n1)
self.nodes.add(n2)
self.edges.setdefault(n1, {}).update({n2: w})
if self.undirected:
self.edges.setdefault(n2, {}).update({n1: w})
def connected_components(self, threshold=0.9, op=lt):
if not self.undirected:
warn = "Warning, connected _components not "
warn += "defined for a directed graph"
print(warn)
return None
else:
nodes = set(self.nodes)
components, visited = [], set()
while len(nodes) > 0:
connected, visited = self.dfs(
nodes.pop(), visited, threshold, op)
connected = set(connected)
for node in connected:
if node in nodes:
nodes.remove(node)
subgraph = Graph()
subgraph.nodes = connected
subgraph.no_link = self.no_link
for s in subgraph.nodes:
for k, v in list(self.edges.get(s, {}).items()):
if k in subgraph.nodes:
subgraph.edges.setdefault(s, {}).update({k: v})
if s in self.cluster_lookup:
subgraph.cluster_lookup[s] = self.cluster_lookup[s]
components.append(subgraph)
return components
def dfs(self, v, visited, threshold, op=lt, first=None):
aux = [v]
visited.add(v)
if first is None:
first = v
for i in (n for n, w in list(self.edges.get(v, {}).items())
if op(w, threshold) and n not in visited):
x, y = self.dfs(i, visited, threshold, op, first)
aux.extend(x)
visited = visited.union(y)
return aux, visited
|
lixun910/pysal
|
pysal/explore/giddy/components.py
|
Python
|
bsd-3-clause
| 4,697
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devault.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
opportunitynetwork/devault
|
devault/manage.py
|
Python
|
bsd-3-clause
| 250
|
"""A collection of classes and methods to deal with collections of
rates that together make up a network."""
# Common Imports
import warnings
import functools
import math
import os
from operator import mul
from collections import OrderedDict
from ipywidgets import interact
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
import networkx as nx
# Import Rate
from pynucastro.rates import Rate, Nucleus, Library
mpl.rcParams['figure.dpi'] = 100
class Composition:
"""a composition holds the mass fractions of the nuclei in a network
-- useful for evaluating the rates
"""
def __init__(self, nuclei, small=1.e-16):
"""nuclei is an iterable of the nuclei (Nucleus objects) in the network"""
if not isinstance(nuclei[0], Nucleus):
raise ValueError("must supply an iterable of Nucleus objects")
else:
self.X = {k: small for k in nuclei}
def set_solar_like(self, Z=0.02):
""" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and
the remainder evenly distributed with Z """
num = len(self.X)
rem = Z/(num-2)
for k in self.X:
if k == Nucleus("p"):
self.X[k] = 0.7
elif k.raw == "he4":
self.X[k] = 0.3 - Z
else:
self.X[k] = rem
self.normalize()
def set_all(self, xval):
""" set all species to a particular value """
for k in self.X:
self.X[k] = xval
def set_nuc(self, name, xval):
""" set nuclei name to the mass fraction xval """
for k in self.X:
if k.raw == name:
self.X[k] = xval
break
def normalize(self):
""" normalize the mass fractions to sum to 1 """
X_sum = sum(self.X[k] for k in self.X)
for k in self.X:
self.X[k] /= X_sum
def get_molar(self):
""" return a dictionary of molar fractions"""
molar_frac = {k: v/k.A for k, v in self.X.items()}
return molar_frac
def eval_ye(self):
""" return the electron fraction """
zvec = []
avec = []
xvec = []
for n in self.X:
zvec.append(n.Z)
avec.append(n.A)
xvec.append(self.X[n])
zvec = np.array(zvec)
avec = np.array(avec)
xvec = np.array(xvec)
electron_frac = np.sum(zvec*xvec/avec)/np.sum(xvec)
return electron_frac
def __str__(self):
ostr = ""
for k in self.X:
ostr += f" X({k}) : {self.X[k]}\n"
return ostr
class RateCollection:
""" a collection of rates that together define a network """
pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, rate_files=None, libraries=None, rates=None, precedence=()):
"""
rate_files are the files that together define the network. This
can be any iterable or single string.
This can include Reaclib library files storing multiple rates.
If libraries is supplied, initialize a RateCollection using the rates
in the Library object(s) in list 'libraries'.
If rates is supplied, initialize a RateCollection using the
Rate objects in the list 'rates'.
Precedence should be sequence of rate labels (e.g. wc17) to be used to
resolve name conflicts. If a nonempty sequence is provided, the rate
collection will automatically be scanned for multiple rates with the
same name. If all of their labels were given a ranking, the rate with
the label that comes first in the sequence will be retained and the
rest discarded.
Any combination of these options may be supplied.
"""
self.files = []
self.rates = []
self.library = None
if rate_files:
if isinstance(rate_files, str):
rate_files = [rate_files]
self._read_rate_files(rate_files)
if rates:
if isinstance(rates, Rate):
rates = [rates]
try:
for r in rates:
assert isinstance(r, Rate)
except:
print('Expected Rate object or list of Rate objects passed as the rates argument.')
raise
else:
rlib = Library(rates=rates)
if not self.library:
self.library = rlib
else:
self.library = self.library + rlib
if libraries:
if isinstance(libraries, Library):
libraries = [libraries]
try:
for lib in libraries:
assert isinstance(lib, Library)
except:
print('Expected Library object or list of Library objects passed as the libraries argument.')
raise
else:
if not self.library:
self.library = libraries.pop(0)
for lib in libraries:
self.library = self.library + lib
if self.library:
self.rates = self.rates + self.library.get_rates()
if precedence:
self._make_distinguishable(precedence)
# get the unique nuclei
u = []
for r in self.rates:
t = set(r.reactants + r.products)
u = set(list(u) + list(t))
self.unique_nuclei = sorted(u)
# now make a list of each rate that touches each nucleus
# we'll store this in a dictionary keyed on the nucleus
self.nuclei_consumed = OrderedDict()
self.nuclei_produced = OrderedDict()
for n in self.unique_nuclei:
self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]
self.nuclei_produced[n] = [r for r in self.rates if n in r.products]
# Re-order self.rates so Reaclib rates come first,
# followed by Tabular rates. This is needed if
# reaclib coefficients are targets of a pointer array
# in the Fortran network.
# It is desired to avoid wasting array size
# storing meaningless Tabular coefficient pointers.
self.rates = sorted(self.rates,
key=lambda r: r.chapter == 't')
self.tabular_rates = []
self.reaclib_rates = []
for n, r in enumerate(self.rates):
if r.chapter == 't':
self.tabular_rates.append(n)
elif isinstance(r.chapter, int):
self.reaclib_rates.append(n)
else:
print('ERROR: Chapter type unknown for rate chapter {}'.format(
str(r.chapter)))
exit()
def _read_rate_files(self, rate_files):
# get the rates
self.files = rate_files
for rf in self.files:
try:
rflib = Library(rf)
except:
print(f"Error reading library from file: {rf}")
raise
else:
if not self.library:
self.library = rflib
else:
self.library = self.library + rflib
def get_nuclei(self):
""" get all the nuclei that are part of the network """
return self.unique_nuclei
def evaluate_rates(self, rho, T, composition):
"""evaluate the rates for a specific density, temperature, and
composition"""
rvals = OrderedDict()
ys = composition.get_molar()
y_e = composition.eval_ye()
for r in self.rates:
val = r.prefactor * rho**r.dens_exp * r.eval(T, rho * y_e)
if (r.weak_type == 'electron_capture' and not r.tabular):
val = val * y_e
yfac = functools.reduce(mul, [ys[q] for q in r.reactants])
rvals[r] = yfac * val
return rvals
def evaluate_ydots(self, rho, T, composition):
"""evaluate net rate of change of molar abundance for each nucleus
for a specific density, temperature, and composition"""
rvals = self.evaluate_rates(rho, T, composition)
ydots = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net change is difference between produced and consumed
ydots[nuc] = sum(produced) - sum(consumed)
return ydots
def evaluate_activity(self, rho, T, composition):
"""sum over all of the terms contributing to ydot,
neglecting sign"""
rvals = self.evaluate_rates(rho, T, composition)
act = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net activity is sum of produced and consumed
act[nuc] = sum(produced) + sum(consumed)
return act
def network_overview(self):
""" return a verbose network overview """
ostr = ""
for n in self.unique_nuclei:
ostr += f"{n}\n"
ostr += " consumed by:\n"
for r in self.nuclei_consumed[n]:
ostr += f" {r.string}\n"
ostr += " produced by:\n"
for r in self.nuclei_produced[n]:
ostr += f" {r.string}\n"
ostr += "\n"
return ostr
def get_screening_map(self):
"""a screening map is just a list of tuples containing the information
about nuclei pairs for screening: (descriptive name of nuclei,
nucleus 1, nucleus 2, rate, 1-based index of rate)
"""
screening_map = []
for k, r in enumerate(self.rates):
if r.ion_screen:
nucs = "_".join([str(q) for q in r.ion_screen])
in_map = False
for h, _, _, mrates, krates in screening_map:
if h == nucs:
# if we already have the reactants, then we
# will already be doing the screening factors,
# so just append this new rate to the list we
# are keeping of the rates where this
# screening is needed
in_map = True
mrates.append(r)
krates.append(k+1)
break
if not in_map:
# we handle 3-alpha specially -- we actually need 2 screening factors for it
if nucs == "he4_he4_he4":
# he4 + he4
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
# he4 + be8
be8 = Nucleus("Be8", dummy=True)
screening_map.append((nucs+"_dummy", r.ion_screen[2], be8,
[r], [k+1]))
else:
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
return screening_map
def write_network(self, *args, **kwargs):
"""Before writing the network, check to make sure the rates
are distinguishable by name."""
assert self._distinguishable_rates(), "ERROR: Rates not uniquely identified by Rate.fname"
self._write_network(*args, **kwargs)
def _distinguishable_rates(self):
"""Every Rate in this RateCollection should have a unique Rate.fname,
as the network writers distinguish the rates on this basis."""
names = [r.fname for r in self.rates]
for n, r in zip(names, self.rates):
k = names.count(n)
if k > 1:
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Rate {r} has the original source:\n{r.original_source}')
print(f'Rate {r} is in chapter {r.chapter}')
return len(set(names)) == len(self.rates)
def _make_distinguishable(self, precedence):
"""If multiple rates have the same name, eliminate the extraneous ones according to their
labels' positions in the precedence list. Only do this if all of the labels have
rankings in the list."""
nameset = {r.fname for r in self.rates}
precedence = {lab: i for i, lab in enumerate(precedence)}
def sorting_key(i): return precedence[self.rates[i].label]
for n in nameset:
# Count instances of name, and cycle if there is only one
ind = [i for i, r in enumerate(self.rates) if r.fname == n]
k = len(ind)
if k <= 1: continue
# If there were multiple instances, use the precedence settings to delete extraneous
# rates
labels = [self.rates[i].label for i in ind]
if all(lab in precedence for lab in labels):
sorted_ind = sorted(ind, key=sorting_key)
r = self.rates[sorted_ind[0]]
for i in sorted(sorted_ind[1:], reverse=True): del self.rates[i]
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Kept only entry with label {r.label} out of {labels}.')
def _write_network(self, *args, **kwargs):
"""A stub for function to output the network -- this is implementation
dependent."""
print('To create network integration source code, use a class that implements a specific network type.')
return
def plot(self, outfile=None, rho=None, T=None, comp=None,
size=(800, 600), dpi=100, title=None,
ydot_cutoff_value=None,
node_size=1000, node_font_size=13, node_color="#A0CBE2", node_shape="o",
N_range=None, Z_range=None, rotated=False,
always_show_p=False, always_show_alpha=False, hide_xalpha=False, filter_function=None):
"""Make a plot of the network structure showing the links between
nuclei. If a full set of thermodymamic conditions are
provided (rho, T, comp), then the links are colored by rate
strength.
parameters
----------
outfile: output name of the plot -- extension determines the type
rho: density to evaluate rates with
T: temperature to evaluate rates with
comp: composition to evaluate rates with
size: tuple giving width x height of the plot in inches
dpi: pixels per inch used by matplotlib in rendering bitmap
title: title to display on the plot
ydot_cutoff_value: rate threshold below which we do not show a
line corresponding to a rate
node_size: size of a node
node_font_size: size of the font used to write the isotope in the node
node_color: color to make the nodes
node_shape: shape of the node (using matplotlib marker names)
N_range: range of neutron number to zoom in on
Z_range: range of proton number to zoom in on
rotate: if True, we plot A - 2Z vs. Z instead of the default Z vs. N
always_show_p: include p as a node on the plot even if we
don't have p+p reactions
always_show_alpha: include He4 as a node on the plot even if we don't have 3-alpha
hide_xalpha=False: dont connect the links to alpha for heavy
nuclei reactions of the form A(alpha,X)B or A(X,alpha)B, except if alpha
is the heaviest product.
filter_function: name of a custom function that takes the list
of nuclei and returns a new list with the nuclei to be shown
as nodes.
"""
G = nx.MultiDiGraph()
G.position = {}
G.labels = {}
fig, ax = plt.subplots()
#divider = make_axes_locatable(ax)
#cax = divider.append_axes('right', size='15%', pad=0.05)
#ax.plot([0, 0], [8, 8], 'b-')
# in general, we do not show p, n, alpha,
# unless we have p + p, 3-a, etc.
hidden_nuclei = ["n"]
if not always_show_p:
hidden_nuclei.append("p")
if not always_show_alpha:
hidden_nuclei.append("he4")
# nodes -- the node nuclei will be all of the heavies
# add all the nuclei into G.node
node_nuclei = []
for n in self.unique_nuclei:
if n.raw not in hidden_nuclei:
node_nuclei.append(n)
else:
for r in self.rates:
if r.reactants.count(n) > 1:
node_nuclei.append(n)
break
if filter_function is not None:
node_nuclei = list(filter(filter_function, node_nuclei))
for n in node_nuclei:
G.add_node(n)
if rotated:
G.position[n] = (n.Z, n.A - 2*n.Z)
else:
G.position[n] = (n.N, n.Z)
G.labels[n] = fr"${n.pretty}$"
# get the rates for each reaction
if rho is not None and T is not None and comp is not None:
ydots = self.evaluate_rates(rho, T, comp)
else:
ydots = None
# Do not show rates on the graph if their corresponding ydot is less than ydot_cutoff_value
invisible_rates = set()
if ydot_cutoff_value is not None:
for r in self.rates:
if ydots[r] < ydot_cutoff_value:
invisible_rates.add(r)
# edges
for n in node_nuclei:
for r in self.nuclei_consumed[n]:
for p in r.products:
if p in node_nuclei:
if hide_xalpha:
# first check is alpha is the heaviest nucleus on the RHS
rhs_heavy = sorted(r.products)[-1]
if not (rhs_heavy.Z == 2 and rhs_heavy.A == 4):
# for rates that are A (x, alpha) B, where A and B are heavy nuclei,
# don't show the connection of the nucleus to alpha, only show it to B
if p.Z == 2 and p.A == 4:
continue
# likewise, hide A (alpha, x) B, unless A itself is an alpha
c = r.reactants
n_alpha = 0
for nuc in c:
if nuc.Z == 2 and nuc.A == 4:
n_alpha += 1
# if there is only 1 alpha and we are working on the alpha node,
# then skip
if n_alpha == 1 and n.Z == 2 and n.A == 4:
continue
# networkx doesn't seem to keep the edges in
# any particular order, so we associate data
# to the edges here directly, in this case,
# the reaction rate, which will be used to
# color it
if ydots is None:
G.add_edges_from([(n, p)], weight=0.5)
else:
if r in invisible_rates:
continue
try:
rate_weight = math.log10(ydots[r])
except ValueError:
# if ydots[r] is zero, then set the weight
# to roughly the minimum exponent possible
# for python floats
rate_weight = -308
except:
raise
G.add_edges_from([(n, p)], weight=rate_weight)
# It seems that networkx broke backwards compatability, and 'zorder' is no longer a valid
# keyword argument. The 'linewidth' argument has also changed to 'linewidths'.
nx.draw_networkx_nodes(G, G.position, # plot the element at the correct position
node_color=node_color, alpha=1.0,
node_shape=node_shape, node_size=node_size, linewidths=2.0, ax=ax)
nx.draw_networkx_labels(G, G.position, G.labels, # label the name of element at the correct position
font_size=node_font_size, font_color="w", ax=ax)
# get the edges and weights coupled in the same order
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
edge_color=weights
ww = np.array(weights)
min_weight = ww.min()
max_weight = ww.max()
dw = (max_weight - min_weight)/4
widths = np.ones_like(ww)
widths[ww > min_weight + dw] = 1.5
widths[ww > min_weight + 2*dw] = 2.5
widths[ww > min_weight + 3*dw] = 4
edges_lc = nx.draw_networkx_edges(G, G.position, width=list(widths), # plot the arrow of reaction
edgelist=edges, edge_color=edge_color,
node_size=node_size,
edge_cmap=plt.cm.viridis, ax=ax)
# for networkx <= 2.0 draw_networkx_edges returns a
# LineCollection matplotlib type which we can use for the
# colorbar directly. For networkx >= 2.1, it is a collection
# of FancyArrowPatch-s, which we need to run through a
# PatchCollection. See:
# https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection
if ydots is not None:
pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)
pc.set_array(weights)
if not rotated:
plt.colorbar(pc, ax=ax, label="log10(rate)")
else:
plt.colorbar(pc, ax=ax, label="log10(rate)", orientation="horizontal", fraction=0.05)
Ns = [n.N for n in node_nuclei]
Zs = [n.Z for n in node_nuclei]
if not rotated:
ax.set_xlim(min(Ns)-1, max(Ns)+1)
else:
ax.set_xlim(min(Zs)-1, max(Zs)+1)
#plt.ylim(min(Zs)-1, max(Zs)+1)
if not rotated:
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"$Z$", fontsize="large")
else:
plt.xlabel(r"$Z$", fontsize="large")
plt.ylabel(r"$A - 2Z$", fontsize="large")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if Z_range is not None and N_range is not None:
if not rotated:
ax.set_xlim(N_range[0], N_range[1])
ax.set_ylim(Z_range[0], Z_range[1])
else:
ax.set_xlim(Z_range[0], Z_range[1])
if not rotated:
ax.set_aspect("equal", "datalim")
fig.set_size_inches(size[0]/dpi, size[1]/dpi)
if title is not None:
fig.suptitle(title)
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
@staticmethod
def _safelog(arr, small):
arr = np.copy(arr)
if np.any(arr < 0.0):
raise ValueError("Negative values not allowed for logscale - try symlog instead.")
zeros = arr == 0.0
arr[zeros] = min(small, arr[~zeros].min() / 10)
return np.log10(arr)
@staticmethod
def _symlog(arr, linthresh=1.0):
assert linthresh >= 1.0
neg = arr < 0.0
arr = np.abs(arr)
needslog = arr > linthresh
arr[needslog] = np.log10(arr[needslog]) + linthresh
arr[neg] *= -1
return arr
@staticmethod
def _scale(arr, minval=None, maxval=None):
if minval is None: minval = arr.min()
if maxval is None: maxval = arr.max()
if minval != maxval:
scaled = (arr - minval) / (maxval - minval)
else:
scaled = np.zeros_like(arr)
scaled[scaled < 0.0] = 0.0
scaled[scaled > 1.0] = 1.0
return scaled
def gridplot(self, comp=None, color_field="X", rho=None, T=None, **kwargs):
"""
Plot nuclides as cells on a grid of Z vs. N, colored by *color_field*. If called
without a composition, the function will just plot the grid with no color field.
:param comp: Composition of the environment.
:param color_field: Field to color by. Must be one of 'X' (mass fraction),
'Y' (molar abundance), 'Xdot' (time derivative of X), 'Ydot' (time
derivative of Y), or 'activity' (sum of contributions to Ydot of
all rates, ignoring sign).
:param rho: Density to evaluate rates at. Needed for fields involving time
derivatives.
:param T: Temperature to evaluate rates at. Needed for fields involving time
derivatives.
:Keyword Arguments:
- *scale* -- One of 'linear', 'log', and 'symlog'. Linear by default.
- *small* -- If using logarithmic scaling, zeros will be replaced with
this value. 1e-30 by default.
- *linthresh* -- Linearity threshold for symlog scaling.
- *filter_function* -- A callable to filter Nucleus objects with. Should
return *True* if the nuclide should be plotted.
- *outfile* -- Output file to save the plot to. The plot will be shown if
not specified.
- *dpi* -- DPI to save the image file at.
- *cmap* -- Name of the matplotlib colormap to use. Default is 'magma'.
- *edgecolor* -- Color of grid cell edges.
- *area* -- Area of the figure without the colorbar, in square inches. 64
by default.
- *no_axes* -- Set to *True* to omit axis spines.
- *no_ticks* -- Set to *True* to omit tickmarks.
- *no_cbar* -- Set to *True* to omit colorbar.
- *cbar_label* -- Colorbar label.
- *cbar_bounds* -- Explicit colorbar bounds.
- *cbar_format* -- Format string or Formatter object for the colorbar ticks.
"""
# Process kwargs
outfile = kwargs.pop("outfile", None)
scale = kwargs.pop("scale", "linear")
cmap = kwargs.pop("cmap", "viridis")
edgecolor = kwargs.pop("edgecolor", "grey")
small = kwargs.pop("small", 1e-30)
area = kwargs.pop("area", 64)
no_axes = kwargs.pop("no_axes", False)
no_ticks = kwargs.pop("no_ticks", False)
no_cbar = kwargs.pop("no_cbar", False)
cbar_label = kwargs.pop("cbar_label", None)
cbar_format = kwargs.pop("cbar_format", None)
cbar_bounds = kwargs.pop("cbar_bounds", None)
filter_function = kwargs.pop("filter_function", None)
dpi = kwargs.pop("dpi", 100)
linthresh = kwargs.pop("linthresh", 1.0)
if kwargs: warnings.warn(f"Unrecognized keyword arguments: {kwargs.keys()}")
# Get figure, colormap
fig, ax = plt.subplots()
cmap = mpl.cm.get_cmap(cmap)
# Get nuclei and all 3 numbers
nuclei = self.unique_nuclei
if filter_function is not None:
nuclei = list(filter(filter_function, nuclei))
Ns = np.array([n.N for n in nuclei])
Zs = np.array([n.Z for n in nuclei])
As = Ns + Zs
# Compute weights
color_field = color_field.lower()
if color_field not in {"x", "y", "ydot", "xdot", "activity"}:
raise ValueError(f"Invalid color field: '{color_field}'")
if comp is None:
values = np.zeros(len(nuclei))
elif color_field == "x":
values = np.array([comp.X[nuc] for nuc in nuclei])
elif color_field == "y":
ys = comp.get_molar()
values = np.array([ys[nuc] for nuc in nuclei])
elif color_field in {"ydot", "xdot"}:
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
ydots = self.evaluate_ydots(rho, T, comp)
values = np.array([ydots[nuc] for nuc in nuclei])
if color_field == "xdot": values *= As
elif color_field == "activity":
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
act = self.evaluate_activity(rho, T, comp)
values = np.array([act[nuc] for nuc in nuclei])
if scale == "log": values = self._safelog(values, small)
elif scale == "symlog": values = self._symlog(values, linthresh)
if cbar_bounds is None:
cbar_bounds = values.min(), values.max()
weights = self._scale(values, *cbar_bounds)
# Plot a square for each nucleus
for nuc, weight in zip(nuclei, weights):
square = plt.Rectangle((nuc.N - 0.5, nuc.Z - 0.5), width=1, height=1,
facecolor=cmap(weight), edgecolor=edgecolor)
ax.add_patch(square)
# Set limits
maxN, minN = max(Ns), min(Ns)
maxZ, minZ = max(Zs), min(Zs)
plt.xlim(minN - 0.5, maxN + 0.6)
plt.ylim(minZ - 0.5, maxZ + 0.6)
# Set plot appearance
rat = (maxN - minN) / (maxZ - minZ)
width = np.sqrt(area * rat)
height = area / width
fig.set_size_inches(width, height)
plt.xlabel(r"N $\rightarrow$")
plt.ylabel(r"Z $\rightarrow$")
if no_axes or no_ticks:
plt.tick_params \
(
axis = 'both',
which = 'both',
bottom = False,
left = False,
labelbottom = False,
labelleft = False
)
else:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if no_axes:
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# Colorbar stuff
if not no_cbar and comp is not None:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='3.5%', pad=0.1)
cbar_norm = mpl.colors.Normalize(*cbar_bounds)
smap = mpl.cm.ScalarMappable(norm=cbar_norm, cmap=cmap)
if not cbar_label:
capfield = color_field.capitalize()
if scale == "log":
cbar_label = f"log[{capfield}]"
elif scale == "symlog":
cbar_label = f"symlog[{capfield}]"
else:
cbar_label = capfield
fig.colorbar(smap, cax=cax, orientation="vertical",
label=cbar_label, format=cbar_format)
# Show or save
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
def __repr__(self):
string = ""
for r in self.rates:
string += f"{r.string}\n"
return string
class Explorer:
""" interactively explore a rate collection """
def __init__(self, rc, comp, size=(800, 600),
ydot_cutoff_value=None,
always_show_p=False, always_show_alpha=False):
""" take a RateCollection and a composition """
self.rc = rc
self.comp = comp
self.size = size
self.ydot_cutoff_value = ydot_cutoff_value
self.always_show_p = always_show_p
self.always_show_alpha = always_show_alpha
def _make_plot(self, logrho, logT):
self.rc.plot(rho=10.0**logrho, T=10.0**logT,
comp=self.comp, size=self.size,
ydot_cutoff_value=self.ydot_cutoff_value,
always_show_p=self.always_show_p,
always_show_alpha=self.always_show_alpha)
def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):
"""Perform interactive exploration of the network structure."""
interact(self._make_plot, logrho=logrho, logT=logT)
|
pyreaclib/pyreaclib
|
pynucastro/networks/rate_collection.py
|
Python
|
bsd-3-clause
| 34,049
|
# proxy module
from __future__ import absolute_import
from mayavi.filters.api import *
|
enthought/etsproxy
|
enthought/mayavi/filters/api.py
|
Python
|
bsd-3-clause
| 87
|
# -*- coding: utf-8 -*-
"""
Version code adopted from Django development version.
https://github.com/django/django
"""
VERSION = (0, 7, 2, 'final', 0)
def get_version(version=None):
"""
Returns a PEP 386-compliant version number from VERSION.
"""
if version is None:
from modeltranslation import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""
Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
TODO: Check if we can rely on services like read-the-docs to pick this up.
"""
import datetime
import os
import subprocess
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, cwd=repo_dir,
universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
yaroslavprogrammer/django-modeltranslation
|
modeltranslation/__init__.py
|
Python
|
bsd-3-clause
| 2,020
|
import os
from pkg_resources import resource_filename
__all__ = [
'get_filepath',
]
def get_filepath(name='sherpa_wz.hepmc'):
return resource_filename('deepjets', os.path.join('testdata', name))
|
deepjets/deepjets
|
deepjets/testdata/__init__.py
|
Python
|
bsd-3-clause
| 207
|
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
from django.test.utils import override_script_prefix
from django.urls import clear_url_caches, reverse, translate_url
from django.utils import translation
from django.utils._os import upath
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.patterns.urls.default',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
],
},
}],
)
class URLTestCaseBase(SimpleTestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
with translation.override(None):
self.assertEqual(reverse('prefixed'), '/%s/prefixed/' % settings.LANGUAGE_CODE)
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
with self.assertRaises(ImproperlyConfigured):
reverse('account:register')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
class RequestURLConfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
def test_request_urlconf_considered(self):
request = RequestFactory().get('/nl/')
request.urlconf = 'i18n.patterns.urls.default'
middleware = LocaleMiddleware()
with translation.override('nl'):
middleware.process_request(request)
self.assertEqual(request.LANGUAGE_CODE, 'nl')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root URLconfs, then no
language activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
def test_translate_url_utility(self):
with translation.override('en'):
self.assertEqual(translate_url('/en/non-existent/', 'nl'), '/en/non-existent/')
self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/')
# Namespaced URL
self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registeren/')
self.assertEqual(translation.get_language(), 'en')
with translation.override('nl'):
self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/')
self.assertEqual(translation.get_language(), 'nl')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=[
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
],
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# We only want one redirect, bypassing CommonMiddleware
self.assertListEqual(response.redirect_chain, [('/en/account/register/', 302)])
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = '/script_prefix'
with override_script_prefix(prefix):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix)
self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
|
yephper/django
|
tests/i18n/patterns/tests.py
|
Python
|
bsd-3-clause
| 15,353
|
"""
This module contain solvers for all kinds of equations,
algebraic or transcendental.
"""
import warnings
from collections import defaultdict
from types import GeneratorType
from ..core import (Add, Dummy, E, Equality, Expr, Float, Function, Ge, I,
Integer, Lambda, Mul, Symbol, expand_log, expand_mul,
expand_power_exp, nan, nfloat, pi, preorder_traversal,
sympify)
from ..core.assumptions import check_assumptions
from ..core.compatibility import (default_sort_key, is_sequence, iterable,
ordered)
from ..core.function import AppliedUndef
from ..core.logic import fuzzy_and
from ..core.relational import Relational
from ..functions import (Abs, Max, Min, Piecewise, acos, arg, asin, atan,
atan2, cos, exp, im, log, piecewise_fold, re, sin,
sqrt, tan)
from ..functions.elementary.hyperbolic import HyperbolicFunction
from ..functions.elementary.trigonometric import TrigonometricFunction
from ..logic import false, true
from ..matrices import Matrix, zeros
from ..polys import Poly, RootOf, factor, roots
from ..polys.polyerrors import PolynomialError
from ..simplify import (denom, logcombine, nsimplify, posify, powdenest,
powsimp, simplify)
from ..simplify.fu import TR1
from ..simplify.sqrtdenest import unrad
from ..utilities import filldedent
from ..utilities.iterables import uniq
from .polysys import solve_linear_system, solve_poly_system, solve_surd_system
from .utils import checksol
__all__ = 'solve', 'solve_linear', 'minsolve_linear_system'
def denoms(eq, symbols=None):
"""Return (recursively) set of all denominators that appear in eq
that contain any symbol in iterable ``symbols``; if ``symbols`` is
None (default) then all denominators will be returned.
Examples
========
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if den == 1:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def solve(f, *symbols, **flags):
r"""Algebraically solves equation or system of equations.
Parameters
==========
f : Expr, Equality or iterable of above
All expressions are assumed to be equal to 0.
\*symbols : tuple
If none symbols given (empty tuple), free symbols
of expressions will be used.
\*\*flags : dict
A dictionary of following parameters:
check : bool, optional
If False, don't do any testing of solutions. Default is
True, i.e. the solutions are checked and those that doesn't
satisfy given assumptions on symbols solved for or make any
denominator zero - are automatically excluded.
warn : bool, optional
Show a warning if :func:`~diofant.solvers.utils.checksol`
could not conclude. Default is False.
simplify : bool, optional
Enable simplification (default) for all but polynomials of
order 3 or greater before returning them and (if check is
not False) use the general simplify function on the solutions
and the expression obtained when they are substituted into the
function which should be zero.
rational : bool or None, optional
If True, recast Floats as Rational. If None (default),
Floats will be recast as rationals but the answer will be
recast as Floats. If the flag is False then nothing
will be done to the Floats.
cubics, quartics, quintics : bool, optional
Return explicit solutions (with radicals, which can be quite
long) when, respectively, cubic, quartic or quintic expressions
are encountered. Default is True. If False,
:class:`~diofant.polys.rootoftools.RootOf` instances will
be returned instead.
Examples
========
Single equation:
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(x**2 - 1)
[{x: -1}, {x: 1}]
We could restrict solutions by using assumptions:
>>> p = Symbol('p', positive=True)
>>> solve(p**2 - 1)
[{p: 1}]
Several equations:
>>> solve((x + 5*y - 2, -3*x + 6*y - 15))
[{x: -3, y: 1}]
>>> solve((x + 5*y - 2, -3*x + 6*y - z))
[{x: -5*z/21 + 4/7, y: z/21 + 2/7}]
No solution:
>>> solve([x + 3, x - 3])
[]
Notes
=====
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save one from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method.
>>> solve(f(x) - x, f(x))
[{f(x): x}]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[{Derivative(f(x), x): x + f(x)}]
See Also
========
diofant.solvers.recurr.rsolve : solving recurrence equations
diofant.solvers.ode.dsolve : solving differential equations
diofant.solvers.inequalities.reduce_inequalities : solving inequalities
"""
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and symbols[0] and
(isinstance(symbols[0], (Dummy, Symbol)) or
is_sequence(symbols[0], include=GeneratorType)))
f, symbols = (_sympified_list(w) for w in [f, symbols])
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, Equality):
if 'ImmutableMatrix' in (type(a).__name__ for a in fi.args):
f[i] = fi.lhs - fi.rhs
else:
f[i] = Add(fi.lhs, -fi.rhs, evaluate=False)
elif isinstance(fi, Relational):
raise ValueError(f'Only expressions or equalities supported, got {fi}')
elif isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# replace min/max:
f[i] = f[i].replace(lambda w: isinstance(w, (Min, Max)),
lambda w: w.rewrite(Piecewise))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = Integer(0)
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not (p.is_number or p.is_Add or p.is_Mul) or \
isinstance(p, AppliedUndef):
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
# supply dummy symbols so solve(3) behaves like solve(3, x)
for i in range(len(f) - len(symbols)):
symbols.append(Dummy())
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# real/imag handling -----------------------------
w = Dummy('w')
piece = Lambda(w, Piecewise((w, Ge(w, 0)), (-w, True)))
for i, fi in enumerate(f):
# Abs
reps = []
for a in fi.atoms(Abs):
if not a.has(*symbols):
continue
if a.args[0].is_extended_real is None and a.args[0].is_imaginary is not True:
raise NotImplementedError(f'solving {a} when the argument '
'is not real or imaginary.')
reps.append((a, piece(a.args[0]) if a.args[0].is_extended_real else
piece(a.args[0]*I)))
fi = fi.subs(reps)
# arg
_arg = [a for a in fi.atoms(arg) if a.has(*symbols)]
fi = fi.xreplace({a: atan(im(a.args[0])/re(a.args[0])) for a in _arg})
# save changes
f[i] = fi
# see if re(s) or im(s) appear
irf = []
for s in symbols:
if s.is_extended_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in f):
irf.append((s, re(s) + I*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
symbols_new = []
symbol_swapped = False
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
else:
symbol_swapped = True
s_new = Dummy(f'X{i:d}')
symbols_new.append(s_new)
if symbol_swapped:
swap_sym = list(zip(symbols, symbols_new))
f = [fi.subs(swap_sym) for fi in f]
symbols = symbols_new
swap_sym = {v: k for k, v in swap_sym}
else:
swap_sym = {}
# this is needed in the next two events
symset = set(symbols)
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pot.skip()
elif (isinstance(p, bool) or not p.args or p in symset or
p.is_Add or p.is_Mul or p.is_Pow or p.is_Function or
isinstance(p, RootOf)) and p.func not in (re, im):
pass
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
pot.skip()
del seen
non_inverts = {d: Dummy() for d in non_inverts}
f = [fi.subs(non_inverts) for fi in f]
non_inverts = [(v, k.subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# piecewise_fold might cancel denominators, so be sure to check them.
piecewise_dens = set()
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
for i, fi in enumerate(f):
if any(e.has(*symbols) for e in fi.atoms(Piecewise)):
piecewise_dens |= denoms(fi, symbols)
f[i] = piecewise_fold(fi)
if all(_ == 0 for _ in f):
return [{}]
#
# try to get a solution
###########################################################################
if bare_f and len(symbols) == 1:
solution = [{symbols[0]: s} for s in _solve(f[0], symbols[0], **flags)]
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
solution = [{k: v.subs(non_inverts) for k, v in s.items()}
for s in solution]
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if symbol_swapped:
symbols = [swap_sym[k] for k in symbols]
if solution:
for i, sol in enumerate(solution):
solution[i] = {swap_sym[k]: v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
def test_assumptions(sol):
return fuzzy_and([check_assumptions(sol[sym], **sym._assumptions)
for sym in sol])
solution = [s for s in solution if test_assumptions(s) is not False]
warn = flags.get('warn', False)
got_None = [s for s in solution if not test_assumptions(s)]
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
can't be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
solution = [s for s in solution if
all(not checksol(den, s, **flags) for den in piecewise_dens)]
#
# done
###########################################################################
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
return solution
def _solve(f, symbol, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list (possibly empty) should be returned.
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised.
"""
not_impl_msg = 'No algorithms are implemented to solve equation %s'
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = denoms(f, [symbol])
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for n, (expr, cond) in enumerate(f.args):
candidates = _solve(piecewise_fold(expr), symbol, **flags)
for candidate in candidates:
if candidate in result:
continue
try:
v = (cond == true) or cond.subs({symbol: candidate})
except TypeError:
v = False
if v != false:
# Only include solutions that do not match the condition
# of any previous pieces.
matches_other_piece = False
for other_n, (other_expr, other_cond) in enumerate(f.args): # pragma: no branch
if other_n == n:
break
try:
if other_cond.subs({symbol: candidate}) == true:
matches_other_piece = True
break
except TypeError:
pass
if not matches_other_piece:
v = v == true or v.doit()
if isinstance(v, Relational):
v = v.canonical
result.add(Piecewise(
(candidate, v),
(nan, True)
))
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is a linear solution
f_num, sol = solve_linear(f, symbol)
if symbol not in f_num.free_symbols:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
poly = Poly(f_num)
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.denominator
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c != 1: # c could be a Float
return b**ee, c.denominator
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = {b for b in bases if b.is_Function}
trig = {_ for _ in funcs if
isinstance(_, TrigonometricFunction)}
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs({f1: t})
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = []
for sol in cv_sols:
sols.append(cv_inv.subs({t: sol}))
result = list(ordered(sols))
if result is False:
msg = f'multiple generators {gens}'
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs({exp(x): y}) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs({exp(x): y}) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(lambda w: w.is_Pow, _expand).subs({u: t})
assert not ftry.has(symbol)
soln = _solve(ftry, t, **flags)
sols = []
for sol in soln:
for i in inv:
sols.append(i.subs({t: sol}))
result = list(ordered(sols))
else:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
poly = Poly(f_num, gens[0], extension=False)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
soln = poly.all_roots()
else:
soln = list(soln)
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs({t: s}) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
u = unrad(f_num, symbol)
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs({isym: xi}) for xi in _solve(eq, isym, **flags)}
else:
rv = set(_solve(eq, symbol, **flags))
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = denoms(f, [symbol])
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
"""Return a checked solution for list of exprs in terms of one or more
of the symbols. A list of dict's (possibly empty) should be returned.
"""
if len(symbols) != 1 and len(exprs) == 1:
f = exprs[0]
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
# find first successful solution
failed = []
got_s = set()
result = []
for s in symbols:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
polys = []
surds = []
dens = set()
failed = []
result = [{}]
solved_syms = []
algebraic = False
inversions = False
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(denoms(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
if exprs[j] not in (+g, -g):
inversions = True
g = g.as_numer_denom()[0]
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
elif g.is_algebraic_expr(*symbols):
surds.append(g)
else:
failed.append(g)
if surds:
result = solve_surd_system([_.as_expr() for _ in polys] +
surds, *symbols)
solved_syms = list(set().union(*[set(r) for r in result]))
elif polys and all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary {symbols: values} or None
result = solve_linear_system(matrix, *symbols, **flags)
solved_syms = list(result) if result else []
result = [result] if result else [{}]
elif polys:
result = solve_poly_system(polys, *symbols)
solved_syms = list(set().union(*[set(r) for r in result]))
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
if sort:
rv = list(rv)
rv.sort(key=default_sort_key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
u = Dummy() # used in solution checking
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, {u: eq2}, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
soln = _solve(eq2, s, **flags)
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs({s: sol})
# and add this new solution
rnew[s] = sol
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit: # pragma: no cover
raise NotImplementedError(f'could not solve {eq2}')
else:
result = newresult
assert not any(b in bad_results for b in result)
else:
algebraic = True
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and (inversions or not algebraic):
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
return [r for r in result if r]
def solve_linear(f, x):
r"""
Solve equation ``f`` wrt variable ``x``.
Returns
=======
tuple
``(x, solution)``, if there is a linear solution, ``(0, 1)`` if
``f`` is independent of the symbol ``x``, ``(0, 0)`` if solution set
any denominator of ``f`` to zero or ``(numerator, denominator)``
of ``f``, if it's a nonlinear expression wrt ``x``.
Examples
========
>>> solve_linear(1/x - y**2, x)
(x, y**(-2))
>>> solve_linear(x**2/y**2 - 3, x)
(x**2 - 3*y**2, y**2)
>>> solve_linear(y, x)
(0, 1)
>>> solve_linear(1/(1/x - 2), x)
(0, 0)
"""
if not x.is_Symbol:
raise ValueError(f'{x} is not a Symbol')
f = f.replace(lambda e: e.is_Derivative, lambda e: e.doit())
n, d = res = f.as_numer_denom()
poly = n.as_poly(x, extension=False)
if poly is not None and poly.is_linear:
a, b = n.expand().coeff(x, 1), n.expand().coeff(x, 0)
if a != 0 and d.subs({x: -b/a}) != 0:
res = (x, -b/a)
if not n.simplify().has(x):
res = Integer(0), Integer(1)
if x == res[0] and any(checksol(_, {x: res[1]}) for _ in denoms(f, [x])):
res = Integer(0), Integer(0)
return res
def minsolve_linear_system(system, *symbols, **flags):
r"""Find a particular solution to a linear system.
In particular, try to find a solution with the minimal possible number
of non-zero variables. This is a very computationally hard problem.
Parameters
==========
system : Matrix
Nx(M+1) matrix, which means it has to be in augmented form.
\*symbols : list
List of M Symbol’s.
\*\*flags : dict
A dictionary of following parameters:
quick : boolean, optional
If True, a heuristic is used. Otherwise (default) a naive
algorithm with exponential complexity is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = Integer(0)
else:
val = solve(k)[0][x]
if val == 0 and all(v.subs({x: val}) == 0 for v in s.values()):
determined[x] = Integer(1)
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
from ..utilities.misc import debug
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug(f'minsolve: {n}')
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system[:, i].T for i in nonzeros] + [system[:, -1].T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], Integer(1)) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = Integer(1)
else:
s[sym] = Integer(0)
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), pi - asin(x)),
cos: lambda x: (acos(x), 2*pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for _solve that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> _tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> _tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
from .bivariate import bivariate_type, _solve_lambert, _filtered_gens
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if isinstance(f, log):
return _solve(f.args[0] - exp(rhs), sym, **flags)
else:
raise NotImplementedError
elif lhs.is_Pow:
if lhs.exp.is_Integer and lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
elif sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
elif not rhs and sym in lhs.exp.free_symbols:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
return list(ordered(set(sol_base) -
set(_solve(lhs.exp, sym, **flags))))
elif (rhs != 0 and
lhs.base.is_positive and
lhs.exp.is_extended_real):
return _solve(lhs.exp*log(lhs.base) - log(rhs), sym, **flags)
elif lhs.base == 0 and rhs == 1:
return _solve(lhs.exp, sym, **flags)
elif lhs.is_Mul and rhs.is_positive:
llhs = expand_log(log(lhs))
if llhs.is_Add:
return _solve(llhs - log(rhs), sym, **flags)
elif lhs.is_Function and len(lhs.args) == 1 and lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if gi.is_Pow and gi.base is E or isinstance(gi, log):
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(zip(up_or_log, [0]*len(up_or_log))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
return _solve_lambert(lhs - rhs, sym, g)
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered({i.subs({u: s})
for i in inversion for s in sol}))
else:
raise NotImplementedError
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
def _invert(eq, *symbols, **kwargs):
"""Return tuple (i, d) where ``i`` is independent of ``symbols`` and ``d``
contains symbols. ``i`` and ``d`` are obtained after recursively using
algebraic inversion until an uninvertible ``d`` remains. If there are no
free symbols then ``d`` will be zero. Some (but not necessarily all)
solutions to the expression ``i - d`` will be related to the solutions of
the original expression.
Examples
========
>>> _invert(x - 3)
(3, x)
>>> _invert(3)
(3, 0)
>>> _invert(2*cos(x) - 1)
(1/2, cos(x))
>>> _invert(sqrt(x) - 3)
(3, sqrt(x))
>>> _invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> _invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> _invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> _invert(sqrt(x + y) - 2)
(4, x + y)
>>> _invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> _invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, Integer(0)
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = Integer(0)
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep == 0:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep == 1:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = defaultdict(list)
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms[d].append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(i.is_Pow for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
if ai*bi == -1:
if all(
isinstance(i, Function) for i in (ad, bd)) and \
ad.func == bd.func and len(ad.args) == len(bd.args):
if len(ad.args) == 1:
lhs = ad.args[0] - bd.args[0]
else:
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2 or x == 3 - y
raise NotImplementedError('equal function with more than 1 argument')
elif lhs.is_Mul and any(a.is_Pow for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif isinstance(lhs, atan2):
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
if lhs.is_Pow and lhs.base is E:
rhs = log(rhs)
lhs = lhs.exp
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
|
skirpichev/omg
|
diofant/solvers/solvers.py
|
Python
|
bsd-3-clause
| 50,763
|
# -*- coding: utf-8 -*-
"""
channel.py
:copyright: (c) 2015 by Fulfil.IO Inc.
:license: see LICENSE for more details.
"""
from trytond.pool import PoolMeta
from trytond.model import fields
__all__ = ['Channel']
__metaclass__ = PoolMeta
def submit_to_google(url, data):
import requests
import json
return requests.post(
url,
data=json.dumps(data),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer ya29.5AE7v1wOfgun1gR_iXwuGhMnt8nPNbT4C-Pd39DUnsNGb9I6U5FQqRJXNyPb3a0Dk1OWzA', # noqa
}
)
class Channel:
__name__ = "sale.channel"
website = fields.Many2One('nereid.website', 'Website', select=True)
@classmethod
def upload_products_to_google_merchant(cls):
pass
|
prakashpp/trytond-google-merchant
|
channel.py
|
Python
|
bsd-3-clause
| 797
|
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
#
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..utils import check_random_state
def dbscan(X, eps=0.5, min_samples=5, metric='euclidean',
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Parameters
----------
X: array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is given as
'precomputed'.
eps: float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples: int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
random_state: numpy.RandomState, optional
The generator used to initialize the centers. Defaults to numpy.random.
Returns
-------
core_samples: array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/plot_dbscan.py for an example.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, “A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise”.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226–231. 1996
"""
X = np.asarray(X)
n = X.shape[0]
# If index order not given, create random order.
random_state = check_random_state(random_state)
index_order = np.arange(n)
random_state.shuffle(index_order)
D = pairwise_distances(X, metric=metric)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is the
# neighborhood of point i. While True, its useless information)
neighborhoods = [np.where(x <= eps)[0] for x in D]
# Initially, all samples are noise.
labels = -np.ones(n)
# A list of all core samples found.
core_samples = []
# label_num is the label given to the new cluster
label_num = 0
# Look at all samples and determine if they are core.
# If they are then build a new cluster from them.
for index in index_order:
if labels[index] != -1 or len(neighborhoods[index]) < min_samples:
# This point is already classified, or not enough for a core point.
continue
core_samples.append(index)
labels[index] = label_num
# candidates for new core samples in the cluster.
candidates = [index]
while len(candidates) > 0:
new_candidates = []
# A candidate is a core point in the current cluster that has
# not yet been used to expand the current cluster.
for c in candidates:
noise = np.where(labels[neighborhoods[c]] == -1)[0]
noise = neighborhoods[c][noise]
labels[noise] = label_num
for neighbor in noise:
# check if its a core point as well
if len(neighborhoods[neighbor]) >= min_samples:
# is new core point
new_candidates.append(neighbor)
core_samples.append(neighbor)
# Update candidates for next round of cluster expansion.
candidates = new_candidates
# Current cluster finished.
# Next core point found will start a new cluster.
label_num += 1
return core_samples, labels
class DBSCAN(BaseEstimator):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
random_state : numpy.RandomState, optional
The generator used to initialize the centers. Defaults to numpy.random.
Attributes
----------
`core_sample_indices_` : array, shape = [n_core_samples]
Indices of core samples.
`components_` : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
`labels_` : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/plot_dbscan.py for an example.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, “A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise”.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226–231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.random_state = check_random_state(random_state)
def fit(self, X, **params):
"""Perform DBSCAN clustering from vector array or distance matrix.
Parameters
----------
X: array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is
given as 'precomputed'.
params: dict
Overwrite keywords from __init__.
"""
if params:
warnings.warn('Passing parameters to fit methods is '
'depreciated', stacklevel=2)
self.set_params(**params)
self.core_sample_indices_, self.labels_ = dbscan(X,
**self.get_params())
self.components_ = X[self.core_sample_indices_].copy()
return self
|
sgenoud/scikit-learn
|
sklearn/cluster/dbscan_.py
|
Python
|
bsd-3-clause
| 7,355
|
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = []
import os
import sys
import socket
import threading
from . import current_process
from ._ext import _billiard, win32
from .forking import Popen, duplicate, close, ForkingPickler
from .util import register_after_fork, debug, sub_debug
from .connection import Client, Listener
if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
# globals set later
_listener = None
_lock = None
_cache = set()
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.PipeConnection, reduce_pipe_connection)
|
mozilla/firefox-flicks
|
vendor-local/lib/python/billiard/reduction.py
|
Python
|
bsd-3-clause
| 5,061
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of txrecaptcha, a Twisted reCAPTCHA client.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# Matthew Finkel 0x017DD169EA793BE2 <sysrqb@torproject.org>
# :copyright: (c) 2013-2015, Isis Lovecruft
# (c) 2013-2015, Matthew Finkel
# (c) 2013-2015, The Tor Project, Inc.
# :license: see LICENSE file for licensing information
#_____________________________________________________________________________
"""Unittests for the txrecaptcha.resources module."""
from __future__ import print_function
import logging
import ipaddr
import random
from BeautifulSoup import BeautifulSoup
from twisted.internet import reactor
from twisted.internet import task
from twisted.internet.error import AlreadyCalled
from twisted.internet.error import AlreadyCancelled
from twisted.trial import unittest
from twisted.web.resource import Resource
from twisted.web.test import requesthelper
from txrecaptcha import resources
# For additional logger output for debugging, comment out the following:
logging.disable(50)
# and then uncomment the following line:
#resources.logging.getLogger().setLevel(10)
class MockWebResource(Resource):
"""A web resource for protecting."""
def render_GET(self, request):
"""Handles requests for the mock resource.
:type request: :api:`twisted.web.server.Request`
:param request: An incoming request.
"""
try:
template = resources.lookup.get_template('index.html')
rendered = template.render(strings,
rtl=rtl,
lang=langs[0])
except Exception as err:
rendered = resources.replaceErrorPage(err)
return rendered
class DummyRequest(requesthelper.DummyRequest):
"""Wrapper for :api:`twisted.test.requesthelper.DummyRequest` to add
redirect support.
"""
def __init__(self, *args, **kwargs):
requesthelper.DummyRequest.__init__(self, *args, **kwargs)
self.redirect = self._redirect(self)
def URLPath(self):
"""Fake the missing Request.URLPath too."""
return self.uri
def _redirect(self, request):
"""Stub method to add a redirect() method to DummyResponse."""
newRequest = type(request)
newRequest.uri = request.uri
return newRequest
class ReCaptchaProtectedResourceTests(unittest.TestCase):
"""Tests for :mod:`txrecaptcha.resources.ReCaptchaProtectedResource`."""
def setUp(self):
"""Create a :class:`MockWebResource` and protect it with a
:class:`ReCaptchaProtectedResource`.
"""
self.timeout = 10.0 # Can't take longer than that, right?
# Set up our resources to fake a minimal HTTP(S) server:
self.pagename = b'captcha.html'
self.root = Resource()
# (None, None) is the (distributor, scheduleInterval):
self.protectedResource = MockWebResource()
self.captchaResource = resources.ReCaptchaProtectedResource(
publicKey='23',
secretKey='42',
remoteIP='111.111.111.111',
useForwardedHeader=True,
protectedResource=self.protectedResource)
self.root.putChild(self.pagename, self.captchaResource)
# Set up the basic parts of our faked request:
self.request = DummyRequest([self.pagename])
def tearDown(self):
"""Cleanup method for removing timed out connections on the reactor.
This seems to be the solution for the dirty reactor due to
``DelayedCall``s which is mentioned at the beginning of this
file. There doesn't seem to be any documentation anywhere which
proposes this solution, although this seems to solve the problem.
"""
for delay in reactor.getDelayedCalls():
try:
delay.cancel()
except (AlreadyCalled, AlreadyCancelled):
pass
def test_renderDeferred_invalid(self):
""":meth:`_renderDeferred` should redirect a ``Request`` (after the
CAPTCHA was NOT xsuccessfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIsInstance(request, DummyRequest)
soup = BeautifulSoup(b''.join(request.written)).find('meta')['http-equiv']
self.assertEqual(soup, 'refresh')
d = task.deferLater(reactor, 0, lambda x: x, (False, self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_renderDeferred_valid(self):
""":meth:`_renderDeferred` should correctly render a ``Request`` (after
the CAPTCHA has been successfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIsInstance(request, DummyRequest)
html = b''.join(request.written)
self.assertSubstring('Sorry! Something went wrong with your request.',
html)
d = task.deferLater(reactor, 0, lambda x: x, (True, self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_renderDeferred_nontuple(self):
""":meth:`_renderDeferred` should correctly render a ``Request`` (after
the CAPTCHA has been successfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIs(request, None)
d = task.deferLater(reactor, 0, lambda x: x, (self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_checkSolution_blankFields(self):
""":meth:`txrecaptcha.resources.ReCaptchaProtectedResource.checkSolution`
should return a redirect if is the solution field is blank.
"""
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', '')
self.request.addArg('captcha_response_field', '')
self.assertEqual((False, self.request),
self.successResultOf(
self.captchaResource.checkSolution(self.request)))
def test_getRemoteIP_useRandomIP(self):
"""Check that removing our remoteip setting produces a random IP."""
self.captchaResource.remoteIP = None
ip = self.captchaResource.getRemoteIP()
realishIP = ipaddr.IPv4Address(ip).compressed
self.assertTrue(realishIP)
self.assertNotEquals(realishIP, '111.111.111.111')
def test_getRemoteIP_useConfiguredIP(self):
"""Check that our remoteip setting is used if configured."""
ip = self.captchaResource.getRemoteIP()
realishIP = ipaddr.IPv4Address(ip).compressed
self.assertTrue(realishIP)
self.assertEquals(realishIP, '111.111.111.111')
def test_render_GET_missingTemplate(self):
"""render_GET() with a missing template should raise an error and
return the result of replaceErrorPage().
"""
oldLookup = resources.lookup
try:
resources.lookup = None
self.request.method = b'GET'
page = self.captchaResource.render_GET(self.request)
errorPage = resources.replaceErrorPage(Exception('kablam'))
self.assertEqual(page, errorPage)
finally:
resources.lookup = oldLookup
def test_render_POST_blankFields(self):
"""render_POST() with a blank 'captcha_response_field' should return
a redirect to the CaptchaProtectedResource page.
"""
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', '')
self.request.addArg('captcha_response_field', '')
page = self.captchaResource.render_POST(self.request)
self.assertEqual(page, resources.server.NOT_DONE_YET)
def test_render_POST_wrongSolution(self):
"""render_POST() with a wrong 'captcha_response_field' should return
a redirect to the CaptchaProtectedResource page.
"""
expectedChallenge = '23232323232323232323'
expectedResponse = 'awefawefaefawefaewf'
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', expectedChallenge)
self.request.addArg('captcha_response_field', expectedResponse)
page = self.captchaResource.render_POST(self.request)
self.assertEqual(page, resources.server.NOT_DONE_YET)
|
isislovecruft/txrecaptcha
|
test/test_resources.py
|
Python
|
bsd-3-clause
| 9,079
|
# Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_save
from django.dispatch import receiver
try:
from django.utils.timezone import now
except ImportError:
import datetime
now = datetime.datetime.now
from django_sshkey.util import PublicKeyParseError, pubkey_parse
from django_sshkey import settings
class UserKey(models.Model):
user = models.ForeignKey(User, db_index=True)
name = models.CharField(max_length=50, blank=True)
key = models.TextField(max_length=2000)
fingerprint = models.CharField(max_length=128, blank=True, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
last_modified = models.DateTimeField(null=True)
last_used = models.DateTimeField(null=True)
class Meta:
db_table = 'sshkey_userkey'
unique_together = [
('user', 'name'),
]
def __unicode__(self):
return unicode(self.user) + u': ' + self.name
def clean_fields(self, exclude=None):
if not exclude or 'key' not in exclude:
self.key = self.key.strip()
if not self.key:
raise ValidationError({'key': ["This field is required."]})
def clean(self):
self.key = self.key.strip()
if not self.key:
return
try:
pubkey = pubkey_parse(self.key)
except PublicKeyParseError as e:
raise ValidationError(str(e))
self.key = pubkey.format_openssh()
self.fingerprint = pubkey.fingerprint()
if not self.name:
if not pubkey.comment:
raise ValidationError('Name or key comment required')
self.name = pubkey.comment
def validate_unique(self, exclude=None):
if self.pk is None:
objects = type(self).objects
else:
objects = type(self).objects.exclude(pk=self.pk)
if exclude is None or 'name' not in exclude:
if objects.filter(user=self.user, name=self.name).count():
message = 'You already have a key with that name'
raise ValidationError({'name': [message]})
if exclude is None or 'key' not in exclude:
try:
other = objects.get(fingerprint=self.fingerprint, key=self.key)
if self.user == other.user:
message = 'You already have that key on file (%s)' % other.name
else:
message = 'Somebody else already has that key on file'
raise ValidationError({'key': [message]})
except type(self).DoesNotExist:
pass
def export(self, format='RFC4716'):
pubkey = pubkey_parse(self.key)
f = format.upper()
if f == 'RFC4716':
return pubkey.format_rfc4716()
if f == 'PEM':
return pubkey.format_pem()
raise ValueError("Invalid format")
def save(self, *args, **kwargs):
if kwargs.pop('update_last_modified', True):
self.last_modified = now()
super(UserKey, self).save(*args, **kwargs)
def touch(self):
self.last_used = now()
self.save(update_last_modified=False)
@receiver(pre_save, sender=UserKey)
def send_email_add_key(sender, instance, **kwargs):
if not settings.SSHKEY_EMAIL_ADD_KEY or instance.pk:
return
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
context_dict = {
'key': instance,
'subject': settings.SSHKEY_EMAIL_ADD_KEY_SUBJECT,
}
request = getattr(instance, 'request', None)
if request:
context_dict['request'] = request
context_dict['userkey_list_uri'] = request.build_absolute_uri(
reverse('django_sshkey.views.userkey_list'))
text_content = render_to_string('sshkey/add_key.txt', context_dict)
msg = EmailMultiAlternatives(
settings.SSHKEY_EMAIL_ADD_KEY_SUBJECT,
text_content,
settings.SSHKEY_FROM_EMAIL,
[instance.user.email],
)
if settings.SSHKEY_SEND_HTML_EMAIL:
html_content = render_to_string('sshkey/add_key.html', context_dict)
msg.attach_alternative(html_content, 'text/html')
msg.send()
|
ClemsonSoCUnix/django-sshkey
|
django_sshkey/models.py
|
Python
|
bsd-3-clause
| 5,547
|
from catkin_pkg.python_setup import generate_distutils_setup
from setuptools import setup
d = generate_distutils_setup(
packages=['catkin'],
package_dir={'': 'python'},
scripts=[
'bin/catkin_find',
'bin/catkin_init_workspace',
'bin/catkin_make',
'bin/catkin_make_isolated',
'bin/catkin_test_results',
'bin/catkin_topological_order',
],
)
setup(**d)
|
ros/catkin
|
setup.py
|
Python
|
bsd-3-clause
| 416
|
'''
Authentication by token for the serverland dashboard Web API.
Project: MT Server Land prototype code
Author: Will Roberts <William.Roberts@dfki.de>
'''
from piston.utils import rc, translate_mime, MimerDataException
from serverland.dashboard.api.models import AuthToken
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
class TokenAuthentication(object):
'''
Token-based authentication for dashboard API access.
Authorized users will have a 4-byte hexadecimal access token; by
passing this value with the key "token" to an API method, the user
will be authenticated.
'''
def is_authenticated(self, request):
'''Determines whether a given HTTP request is authenticated or
not, and sets the requests user field if it is.'''
token = None
# get a token if this is a GET
if request.GET and 'token' in request.GET:
token = request.GET['token']
# get a token if this is a POST
if request.POST and 'token' in request.POST:
token = request.POST['token']
# translate mime-types in the request if this is a mime
# message
try:
translate_mime(request)
except MimerDataException:
pass
# check if there's a token in the mime data
if ( hasattr(request, 'data') and
request.data and
'token' in request.data ):
token = request.data['token']
if token:
try:
token = AuthToken.objects.get(auth_token = token)
if token.enabled:
request.user = token.user
return True
except (ObjectDoesNotExist, MultipleObjectsReturned):
pass
return False
def challenge(self):
'''Gives the HTTPResponse returned when is_authenticated
returns False.'''
return rc.FORBIDDEN
|
NickRuiz/mt-serverland
|
dashboard/api/authentication.py
|
Python
|
bsd-3-clause
| 1,948
|
class MailgunException(Exception):
pass
|
gnrfan/django-mailgun-validation
|
mailgun_validation/exceptions.py
|
Python
|
bsd-3-clause
| 44
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'oedit'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.objet.editeurs.oedit.presentation import EdtPresentation
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
class CmdOedit(Commande):
"""Commande 'oedit'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "oedit", "oedit")
self.groupe = "administrateur"
self.schema = "<ident>"
self.nom_categorie = "batisseur"
self.aide_courte = "ouvre l'éditeur d'objet"
self.aide_longue = \
"Cette commande permet d'accéder à l'éditeur d'objet. Elle " \
"prend en paramètre l'identifiant de l'objet (que des " \
"minuscules, des chiffres et le signe |ent|_|ff|). Si l'objet " \
"n'existe pas, il est créé."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
ident_objet = dic_masques["ident"].ident
if ident_objet in type(self).importeur.objet.prototypes:
prototype = type(self).importeur.objet.prototypes[ident_objet]
enveloppe = EnveloppeObjet(EdtPresentation, prototype, "")
contexte = enveloppe.construire(personnage)
personnage.contextes.ajouter(contexte)
contexte.actualiser()
else:
editeur = type(self).importeur.interpreteur.construire_editeur(
"oedit", personnage, ident_objet)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
|
stormi/tsunami
|
src/primaires/objet/commandes/oedit/__init__.py
|
Python
|
bsd-3-clause
| 3,339
|
#!/usr/bin/env python2
import os, sygnal, sys
import argparse
import math
|
cuauv/software
|
cave/mainForCommandline.py
|
Python
|
bsd-3-clause
| 75
|
from django.conf.urls import url
import logging
logger = logging.getLogger(__name__)
from argo import views
local_urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^filter/$',views.filter,name='filter'),
url(r'^job_display/(?P<job_num>\w+)/$',views.job_display,name='job_display'),
]
urlpatterns = local_urlpatterns
|
hep-cce/hpc-edge-service
|
argo/urls.py
|
Python
|
bsd-3-clause
| 349
|
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query)
|
car3oon/saleor
|
saleor/search/backends/base.py
|
Python
|
bsd-3-clause
| 8,616
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the JSON property.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .primitive import String
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'JSON',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class JSON(String):
''' Accept JSON string values.
The value is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
Args:
default (string or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
def validate(self, value, detail=True):
super().validate(value, detail)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
msg = "" if not detail else "expected JSON text, got %r" % value
raise ValueError(msg)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
ericmjl/bokeh
|
bokeh/core/property/json.py
|
Python
|
bsd-3-clause
| 2,991
|
# -*- encoding: utf-8 -*-
from imaplib import ParseFlags
# mockimaplib: A very simple mock server module for imap client APIs
# Copyright (C) 2014 Alan Etkin <spametki@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or(at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/lgpl.html>
"""
mockimaplib allows you to test applications connecting to a dummy imap
service. For more details on the api subset implemented,
refer to the imaplib docs.
The client should configure a dictionary to map imap string queries to sets
of entries stored in a message dummy storage dictionary. The module includes
a small set of default message records (SPAM and MESSAGES), two mailboxes
(Draft and INBOX) and a list of query/resultset entries (RESULTS).
Usage:
>>> import mockimaplib
>>> connection = mockimaplib.IMAP4_SSL(<host>)
>>> connection.login(<user>, <password>)
None
>>> connection.select("INBOX")
("OK", ... <mailbox length>)
# fetch commands specifying single uid or message id
# will try to get messages recorded in SPAM
>>> connection.uid(...)
<search query or fetch result>
# returns a string list of matching message ids
>>> connection.search(<query>)
("OK", ... "1 2 ... n")
"""
MESSAGES = (
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:30 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:30 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <10101010101010010000010101010001010101001010010000001@mail.example.com>\r\nSubject: spam1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n",
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:47 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:47 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010@mail.example.com>\r\nSubject: spam2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse, nurse!",
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <1010101010101001000001010101000101010100101001000000101@mail.example.com>\r\nSubject: spamalot1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n",
"MIME-Version: 1.0\r\n\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010101@mail.example.com>\r\nSubject: spamalot2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse! ... Nurse! ... Nurse!\r\n\r\n\r\n",
)
SPAM = {
"INBOX": [
{"uid": "483209", "headers": MESSAGES[0], "complete": MESSAGES[0], "flags": ""},
{"uid": "483211", "headers": MESSAGES[1], "complete": MESSAGES[1], "flags": ""},
{"uid": "483225", "headers": MESSAGES[2], "complete": MESSAGES[2], "flags": ""},
],
"Draft": [
{"uid": "483432", "headers": MESSAGES[3], "complete": MESSAGES[3], "flags": ""},
],
}
RESULTS = {
# <query string>: [<str uid> | <long id>, ...]
"INBOX": {"(ALL)": (1, 2, 3), "(1:3)": (1, 2, 3)},
"Draft": {"(1:1)": (1,)},
}
class Connection(object):
"""Dummy connection object for the imap client.
By default, uses the module SPAM and RESULT
sets (use Connection.setup for custom values)"""
def login(self, user, password):
pass
def __init__(self):
self._readonly = False
self._mailbox = None
self.setup()
def list(self):
return ("OK", ['(\\HasNoChildren) "/" "%s"' % key for key in self.spam])
def select(self, tablename, readonly=False):
self._readonly = readonly
"""args: mailbox, boolean
result[1][0] -> int last message id / mailbox lenght
result[0] = 'OK'
"""
self._mailbox = tablename
return ("OK", (len(SPAM[self._mailbox]), None))
def uid(self, command, uid, arg):
"""args:
command: "search" | "fetch"
uid: None | uid
parts: "(ALL)" | "(RFC822 FLAGS)" | "(RFC822.HEADER FLAGS)"
"search", None, "(ALL)" -> ("OK", ("uid_1 uid_2 ... uid_<mailbox length>", None))
"search", None, "<query>" -> ("OK", ("uid_1 uid_2 ... uid_n", None))
"fetch", uid, parts -> ("OK", (("<id> ...", "<raw message as specified in parts>"), "<flags>")
[0] [1][0][0] [1][0][1] [1][1]
"""
if command == "search":
return self._search(arg)
elif command == "fetch":
return self._fetch(uid, arg)
def _search(self, query):
return (
"OK",
(" ".join([str(item["uid"]) for item in self._get_messages(query)]), None),
)
def _fetch(self, value, arg):
try:
message = self.spam[self._mailbox][value - 1]
message_id = value
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == value:
message = item
message_id = x + 1
break
parts = "headers"
if arg in ("(ALL)", "(RFC822 FLAGS)"):
parts = "complete"
return ("OK", (("%s " % message_id, message[parts]), message["flags"]))
def _get_messages(self, query):
if query.strip().isdigit():
return [
self.spam[self._mailbox][int(query.strip()) - 1],
]
elif query[1:-1].strip().isdigit():
return [
self.spam[self._mailbox][int(query[1:-1].strip()) - 1],
]
elif query[1:-1].replace("UID", "").strip().isdigit():
for item in self.spam[self._mailbox]:
if item["uid"] == query[1:-1].replace("UID", "").strip():
return [
item,
]
messages = []
try:
for m in self.results[self._mailbox][query]:
try:
self.spam[self._mailbox][m - 1]["id"] = m
messages.append(self.spam[self._mailbox][m - 1])
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == m:
item["id"] = x + 1
messages.append(item)
break
except IndexError:
# message removed
pass
return messages
except KeyError:
raise ValueError("The client issued an unexpected query: %s" % query)
def setup(self, spam={}, results={}):
"""adds custom message and query databases or sets
the values to the module defaults.
"""
self.spam = spam
self.results = results
if not spam:
for key in SPAM:
self.spam[key] = []
for d in SPAM[key]:
self.spam[key].append(d.copy())
if not results:
for key in RESULTS:
self.results[key] = RESULTS[key].copy()
def search(self, first, query):
"""args:
first: None
query: string with mailbox query (flags, date, uid, id, ...)
example: '2:15723 BEFORE 27-Jan-2014 FROM "gumby"'
result[1][0] -> "id_1 id_2 ... id_n"
"""
messages = self._get_messages(query)
ids = " ".join([str(item["id"]) for item in messages])
return ("OK", (ids, None))
def append(self, mailbox, flags, struct_time, message):
"""
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
"""
last = self.spam[mailbox][-1]
try:
uid = int(last["uid"]) + 1
except ValueError:
alluids = []
for _mailbox in self.spam.keys():
for item in self.spam[_mailbox]:
try:
alluids.append(int(item["uid"]))
except:
pass
if len(alluids) > 0:
uid = max(alluids) + 1
else:
uid = 1
flags = "FLAGS " + flags
item = {
"uid": str(uid),
"headers": message,
"complete": message,
"flags": flags,
}
self.spam[mailbox].append(item)
return ("OK", "spam spam %s spam" % uid)
def store(self, *args):
"""
implements some flag commands
args: ("<id>", "<+|->FLAGS", "(\\Flag1 \\Flag2 ... \\Flagn)")
"""
message = self.spam[self._mailbox][int(args[0] - 1)]
old_flags = ParseFlags(message["flags"])
flags = ParseFlags("FLAGS" + args[2])
if args[1].strip().startswith("+"):
message["flags"] = "FLAGS (%s)" % " ".join(set(flags + old_flags))
elif args[1].strip().startswith("-"):
message["flags"] = "FLAGS (%s)" % " ".join(
[flag for flag in old_flags if not flag in flags]
)
def expunge(self):
"""implements removal of deleted flag messages"""
for x, item in enumerate(self.spam[self._mailbox]):
if "\\Deleted" in item["flags"]:
self.spam[self._mailbox].pop(x)
class IMAP4(object):
""">>> connection = IMAP4() # creates the dummy imap4 client object"""
def __new__(self, *args, **kwargs):
# args: (server, port)
return Connection()
IMAP4_SSL = IMAP4
|
web2py/pydal
|
pydal/contrib/mockimaplib.py
|
Python
|
bsd-3-clause
| 10,686
|
import re
from .. import irc, var, ini
from ..tools import is_identified
# Require identification with NickServ to send messages.
def ident (f):
def check (user, channel, word):
if is_identified(user):
f(user, channel, word)
else:
irc.msg(channel, "{}: Identify with NickServ first.".format(user))
return check
# Insert a message monitor to look for user activity.
def ins_monitor (line_obj):
if line_obj.event in ["JOIN", "PRIVMSG"]:
send_messages(line_obj.user)
# Fill commands dictionary.
def ins_command ():
var.commands["tell"] = type("command", (object,), {})()
var.commands["tell"].method = leave_message
var.commands["tell"].aliases = [".tell", ".msg"]
var.commands["tell"].usage = ["{} user message - Leave a message to user."]
var.commands["listtell"] = type("command", (object,), {})()
var.commands["listtell"].method = list_messages
var.commands["listtell"].aliases = [".listtell", ".ltell", ".listtells", ".showtells"]
var.commands["listtell"].usage = ["{} - Check if you have any messages and show them."]
# Fill a space for the messages database.
def ins_db ():
var.data["messages"] = ini.fill_dict("messages.ini", "Messages")
# Turning list of strings into a list of tuples.
for user in var.data["messages"]:
msg_list = [(msg.split(" ~ ")[0], msg.split(" ~ ", 1)[1]) for msg in var.data["messages"][user]]
var.data["messages"][user] = msg_list
# Leave a message to someone.
def leave_message (user, channel, word):
# It needs a nickname and a message.
if len(word) < 3:
irc.msg(channel, "{}: Wrong syntax. Check .help".format(user))
return
target = word[1]
message = " ".join(word[2:])
# Check if target is a valid nickname.
match = re.match("[a-zA-Z\[\]\\`_\^\{\|\}][a-zA-Z0-9\[\]\\`_\^\{\|\}]+", target)
if not match or (hasattr(match, "group") and match.group() != target):
irc.msg(channel, "{} is not a valid nickname.".format(target))
return
# Check for "hurr Imma tell myself something".
if target.lower() == user.lower():
irc.msg(channel, "{}: Do it yourself. I'm not .tell'ing you shit!".format(user))
return
# The bot won't tell itself something.
if target.lower() == irc.botnick.lower():
irc.msg(channel, "{}: I'm right here, say it to my face!".format(user))
return
# Check for repeated messages.
if target in var.data["messages"]:
if (user, message) in var.data["messages"][target]:
irc.msg(channel, "{}: You already left this message.".format(user))
return
# Create an empty list for users not in the database.
if target not in var.data["messages"]:
var.data["messages"][target] = []
# Append tuple and add to ini.
var.data["messages"][target].append((user, message))
message_list = ["{} ~ {}".format(pair[0], pair[1]) for pair in var.data["messages"][target]]
ini.add_to_ini("Messages", target, "\n".join(message_list), "messages.ini")
irc.msg(channel, "{}: Message stored.".format(user))
# Send a user stored messages.
def send_messages (user):
# Be case insensitive, please.
for nick in var.data["messages"]:
if user.lower() == nick.lower():
user = nick
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
return
if len(var.data["messages"][user]) > 4:
# Send the first 4 messages.
for pair in var.data["messages"][user][0:4]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove the sent messages.
st_messages = var.data["messages"][user][0:4]
for pair in st_messages:
var.data["messages"][user].remove(pair)
new_messages = ["{} ~ {}".format(pair[0], pair[1]) for pair in var.data["messages"][user]]
ini.add_to_ini("Messages", user, "\n".join(new_messages), "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
irc.msg(user, "You have more messages. Type \x034.showtells\x0f to view them.")
else:
# Send every message.
for pair in var.data["messages"][user]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove them.
del var.data["messages"][user]
ini.remove_from_ini("Messages", user, "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
# Send the rest of the messages.
def list_messages (user, channel, word):
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
irc.msg(channel, "{}: You don't have any messages.".format(user))
return
send_messages(user)
irc.msg(channel, "{}: Sent ;)".format(user))
|
skewerr/deskbot
|
modules/commands/tell.py
|
Python
|
bsd-3-clause
| 4,602
|
from __future__ import absolute_import, unicode_literals
from celery.bin import celery
from djcelery.app import app
from djcelery.management.base import CeleryCommand
base = celery.CeleryCommand(app=app)
class Command(CeleryCommand):
"""The celery command."""
help = 'celery commands, see celery help'
requires_model_validation = True
options = (CeleryCommand.options
+ base.get_options()
+ base.preload_options)
def run_from_argv(self, argv):
argv = self.handle_default_options(argv)
base.execute_from_commandline(
['{0[0]} {0[1]}'.format(argv)] + argv[2:])
|
alexhayes/django-celery
|
djcelery/management/commands/celery.py
|
Python
|
bsd-3-clause
| 643
|
from __future__ import absolute_import, unicode_literals
import datetime
import pytz
import six
ISO8601_DATE_FORMAT = '%Y-%m-%d'
ISO8601_DATETIME_FORMAT = ISO8601_DATE_FORMAT + 'T' + '%H:%M:%S'
def parse_iso8601(value):
"""
Parses a datetime as a UTC ISO8601 date
"""
if not value:
return None
if 'T' in value: # has time
_format = ISO8601_DATETIME_FORMAT
if '.' in value: # has microseconds. Some values from RapidPro don't include this.
_format += '.%f'
if 'Z' in value: # has zero offset marker
_format += 'Z'
else:
_format = ISO8601_DATE_FORMAT
return datetime.datetime.strptime(value, _format).replace(tzinfo=pytz.utc)
def format_iso8601(value):
"""
Formats a datetime as a UTC ISO8601 date
"""
_format = ISO8601_DATETIME_FORMAT + '.%f'
return six.text_type(value.astimezone(pytz.UTC).strftime(_format))
|
caktus/rapidpro-python
|
temba_client/utils.py
|
Python
|
bsd-3-clause
| 935
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error as err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print('*** Error in script or command!\n', file=tkerr)
print('Traceback (most recent call last):', file=tkerr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
self.color = color = self.ColorDelegator()
self.per.insertfilter(color)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = True
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
enable_shell = False
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
# Check for problematic OS X Tk versions and print a warning message
# in the IDLE shell window; this is less intrusive than always opening
# a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
wdv4758h/ZipPy
|
lib-python/3/idlelib/PyShell.py
|
Python
|
bsd-3-clause
| 52,145
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from mock import Mock, patch
from qrl.core.misc import logger
from qrl.core.processors.TxnProcessor import TxnProcessor
from qrl.core.ChainManager import ChainManager
from qrl.core.State import State
from qrl.core.OptimizedAddressState import OptimizedAddressState
from qrl.core.txs.TransferTransaction import TransferTransaction
from qrl.core.TransactionPool import TransactionPool
from tests.misc.helper import replacement_getTime
from qrl.core.p2p.p2pfactory import P2PFactory
logger.initialize_default()
def make_tx(txhash=b'hashbrownies', fee=1, autospec=TransferTransaction, PK=b'publickey', **kwargs):
return Mock(autospec=autospec, txhash=txhash, fee=fee, PK=PK, **kwargs)
@patch('qrl.core.processors.TxnProcessor.logger')
@patch('qrl.core.txs.Transaction.Transaction.get_slave')
class TestTxnProcessor(TestCase):
def setUp(self):
m_state = Mock(name='A Mock State', autospec=State)
m_state.get_address_state.return_value = Mock(name='A Mock AddressState', autospec=OptimizedAddressState)
self.chain_manager = Mock(autospec=ChainManager)
self.chain_manager._state = m_state
tx_attrs = {
'validate.return_value': True, # Custom validation for different Transaction Types
'validate_extended.return_value': True, # Master/slave XMSS tree validation; balance & fee, OTS key reuse
'validate_transaction_pool.return_value': True # checks for OTS key reuse within TransactionPool only
}
self.tx1 = make_tx(name='Mock TX 1', **tx_attrs)
self.tx2 = make_tx(name='Mock TX 2', **tx_attrs)
self.tx3 = make_tx(name='Mock TX 3', **tx_attrs)
self.tx4 = make_tx(name='Mock TX 4', **tx_attrs)
self.m_txpool = Mock(autospec=TransactionPool)
self.m_txpool.get_pending_transaction.side_effect = [(self.tx1, replacement_getTime()),
(self.tx2, replacement_getTime()),
(self.tx3, replacement_getTime()),
(self.tx4, replacement_getTime())]
self.m_broadcast_tx = Mock(autospec=P2PFactory.broadcast_tx)
self.txnprocessor = TxnProcessor(chain_manager=self.chain_manager,
transaction_pool_obj=self.m_txpool,
broadcast_tx=self.m_broadcast_tx)
def test_txnprocessor_all_valid(self, m_get_slave, m_logger):
# Transaction.get_slave() gives you the slave's Qaddress, if the TXN is signed by a slave XMSS tree.
# If it's a normal TXN signed by the master XMSS tree, it returns None. Since we mocked out validate_extended(),
# it doesn't really matter what we set here. It's just to make things explicit. Also because class-level patch
# cannot extend into the setUp() function, only the test_* functions.
m_get_slave.return_value = b'PUBLICKEY'
tx_results = [t for t in self.txnprocessor]
self.assertEqual([True, True, True, True], tx_results)
self.assertEqual(4, self.m_txpool.add_tx_to_pool.call_count)
self.assertEqual(4, self.m_broadcast_tx.call_count)
def test_txnprocessor_tx_validate_fail(self, m_get_slave, m_logger):
m_get_slave.return_value = None
self.chain_manager.validate_all.return_value = False
tx_results = []
for t in self.txnprocessor:
tx_results.append(t)
self.chain_manager.validate_all.return_value = True
self.assertEqual([False, True, True, True], tx_results)
self.assertEqual(3, self.m_txpool.add_tx_to_pool.call_count)
self.assertEqual(3, self.m_broadcast_tx.call_count)
def test_txnprocessor_tx_validate_extended_fail(self, m_get_slave, m_logger):
m_get_slave.return_value = None
self.chain_manager.validate_all.return_value = True
tx_results = []
for t in self.txnprocessor:
tx_results.append(t)
if len(tx_results) == 3:
self.chain_manager.validate_all.return_value = True
else:
self.chain_manager.validate_all.return_value = False
m_logger.info.assert_called()
self.assertEqual([True, False, False, True], tx_results)
self.assertEqual(2, self.m_txpool.add_tx_to_pool.call_count)
self.assertEqual(2, self.m_broadcast_tx.call_count)
def test_txnprocessor_tx_validate_transaction_pool_fail(self, m_get_slave, m_logger):
m_get_slave.return_value = None
tx_results = []
for t in self.txnprocessor:
tx_results.append(t)
if len(tx_results) < 2:
self.chain_manager.validate_all.return_value = True
else:
self.chain_manager.validate_all.return_value = False
m_logger.info.assert_called()
self.assertEqual([True, True, False, False], tx_results)
self.assertEqual(2, self.m_txpool.add_tx_to_pool.call_count)
self.assertEqual(2, self.m_broadcast_tx.call_count)
def test_txnprocessor_tx_all_failure_modes(self, m_get_slave, m_logger):
m_get_slave.return_value = None
tx_results = []
self.chain_manager.validate_all.return_value = True
for t in self.txnprocessor:
tx_results.append(t)
self.chain_manager.validate_all.return_value = False
m_logger.info.assert_called()
self.assertEqual([True, False, False, False], tx_results)
self.assertEqual(1, self.m_txpool.add_tx_to_pool.call_count)
self.assertEqual(1, self.m_broadcast_tx.call_count)
def test_empty(self, m_get_slave, m_logger):
m_get_slave.return_value = None
self.m_txpool.get_pending_transaction.side_effect = None
self.m_txpool.get_pending_transaction.return_value = None
tx_results = [t for t in self.txnprocessor]
self.assertEqual([], tx_results)
self.m_txpool.add_tx_to_pool.assert_not_called()
self.m_broadcast_tx.assert_not_called()
|
cyyber/QRL
|
tests/core/processors/test_TxnProcessor.py
|
Python
|
mit
| 6,292
|
import json
import sys
data = json.load(sys.stdin)
for e in data.itervalues():
if e['senses'] and e['senses'][0]['definition']:
print u"{0}\t{1}".format(
e['hw'], e['senses'][0]['definition']['sen']).encode('utf-8')
|
Eszti/4lang
|
scripts/get_defs.py
|
Python
|
mit
| 241
|
from numbers import Integral, Real
from itertools import chain
import string
import numpy as np
import openmc.checkvalue as cv
import openmc.data
# Supported keywords for continuous-energy cross section plotting
PLOT_TYPES = ['total', 'scatter', 'elastic', 'inelastic', 'fission',
'absorption', 'capture', 'nu-fission', 'nu-scatter', 'unity',
'slowing-down power', 'damage']
# Supported keywoards for multi-group cross section plotting
PLOT_TYPES_MGXS = ['total', 'absorption', 'scatter', 'fission',
'kappa-fission', 'nu-fission', 'prompt-nu-fission',
'deleyed-nu-fission', 'chi', 'chi-prompt', 'chi-delayed',
'inverse-velocity', 'beta', 'decay rate', 'unity']
# Create a dictionary which can be used to convert PLOT_TYPES_MGXS to the
# openmc.XSdata attribute name needed to access the data
_PLOT_MGXS_ATTR = {line: line.replace(' ', '_').replace('-', '_')
for line in PLOT_TYPES_MGXS}
_PLOT_MGXS_ATTR['scatter'] = 'scatter_matrix'
# Special MT values
UNITY_MT = -1
XI_MT = -2
# MTs to combine to generate associated plot_types
_INELASTIC = [mt for mt in openmc.data.SUM_RULES[3] if mt != 27]
PLOT_TYPES_MT = {'total': openmc.data.SUM_RULES[1],
'scatter': [2] + _INELASTIC,
'elastic': [2],
'inelastic': _INELASTIC,
'fission': [18],
'absorption': [27], 'capture': [101],
'nu-fission': [18],
'nu-scatter': [2] + _INELASTIC,
'unity': [UNITY_MT],
'slowing-down power': [2] + _INELASTIC + [XI_MT],
'damage': [444]}
# Operations to use when combining MTs the first np.add is used in reference
# to zero
PLOT_TYPES_OP = {'total': (np.add,),
'scatter': (np.add,) * (len(PLOT_TYPES_MT['scatter']) - 1),
'elastic': (),
'inelastic': (np.add,) * (len(PLOT_TYPES_MT['inelastic']) - 1),
'fission': (), 'absorption': (),
'capture': (), 'nu-fission': (),
'nu-scatter': (np.add,) * (len(PLOT_TYPES_MT['nu-scatter']) - 1),
'unity': (),
'slowing-down power':
(np.add,) * (len(PLOT_TYPES_MT['slowing-down power']) - 2) + (np.multiply,),
'damage': ()}
# Types of plots to plot linearly in y
PLOT_TYPES_LINEAR = {'nu-fission / fission', 'nu-scatter / scatter',
'nu-fission / absorption', 'fission / absorption'}
# Minimum and maximum energies for plotting (units of eV)
_MIN_E = 1.e-5
_MAX_E = 20.e6
def plot_xs(this, types, divisor_types=None, temperature=294., data_type=None,
axis=None, sab_name=None, ce_cross_sections=None,
mg_cross_sections=None, enrichment=None, plot_CE=True, orders=None,
divisor_orders=None, **kwargs):
"""Creates a figure of continuous-energy cross sections for this item.
Parameters
----------
this : str or openmc.Material
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to include in the plot.
divisor_types : Iterable of values of PLOT_TYPES, optional
Cross section types which will divide those produced by types
before plotting. A type of 'unity' can be used to effectively not
divide some types.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
data_type : {'nuclide', 'element', 'material', 'macroscopic'}, optional
Type of object to plot. If not specified, a guess is made based on the
`this` argument.
axis : matplotlib.axes, optional
A previously generated axis to use for plotting. If not specified,
a new axis and figure will be generated.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable; only used
for items which are instances of openmc.Element or openmc.Nuclide
ce_cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
mg_cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None. This is only used for
items which are instances of openmc.Element
plot_CE : bool, optional
Denotes whether or not continuous-energy will be plotted. Defaults to
plotting the continuous-energy data.
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data. This only applies to plots of
multi-group data.
divisor_orders : Iterable of Integral, optional
Same as orders, but for divisor_types
**kwargs
All keyword arguments are passed to
:func:`matplotlib.pyplot.figure`.
Returns
-------
fig : matplotlib.figure.Figure
If axis is None, then a Matplotlib Figure of the generated
cross section will be returned. Otherwise, a value of
None will be returned as the figure and axes have already been
generated.
"""
import matplotlib.pyplot as plt
cv.check_type("plot_CE", plot_CE, bool)
if data_type is None:
if isinstance(this, openmc.Nuclide):
data_type = 'nuclide'
elif isinstance(this, openmc.Element):
data_type = 'element'
elif isinstance(this, openmc.Material):
data_type = 'material'
elif isinstance(this, openmc.Macroscopic):
data_type = 'macroscopic'
elif isinstance(this, str):
if this[-1] in string.digits:
data_type = 'nuclide'
else:
data_type = 'element'
else:
raise TypeError("Invalid type for plotting")
if plot_CE:
# Calculate for the CE cross sections
E, data = calculate_cexs(this, data_type, types, temperature, sab_name,
ce_cross_sections, enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_cexs(this, divisor_types, temperature,
sab_name, ce_cross_sections,
enrichment)
# Create a new union grid, interpolate data and data_div on to that
# grid, and then do the actual division
Enum = E[:]
E = np.union1d(Enum, Ediv)
data_new = np.zeros((len(types), len(E)))
for line in range(len(types)):
data_new[line, :] = \
np.divide(np.interp(E, Enum, data[line, :]),
np.interp(E, Ediv, data_div[line, :]))
if divisor_types[line] != 'unity':
types[line] = types[line] + ' / ' + divisor_types[line]
data = data_new
else:
# Calculate for MG cross sections
E, data = calculate_mgxs(this, data_type, types, orders, temperature,
mg_cross_sections, ce_cross_sections,
enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_mgxs(this, data_type, divisor_types,
divisor_orders, temperature,
mg_cross_sections,
ce_cross_sections, enrichment)
# Perform the division
for line in range(len(types)):
data[line, :] /= data_div[line, :]
if divisor_types[line] != 'unity':
types[line] += ' / ' + divisor_types[line]
# Generate the plot
if axis is None:
fig = plt.figure(**kwargs)
ax = fig.add_subplot(111)
else:
fig = None
ax = axis
# Set to loglog or semilogx depending on if we are plotting a data
# type which we expect to vary linearly
if set(types).issubset(PLOT_TYPES_LINEAR):
plot_func = ax.semilogx
else:
plot_func = ax.loglog
# Plot the data
for i in range(len(data)):
data[i, :] = np.nan_to_num(data[i, :])
if np.sum(data[i, :]) > 0.:
plot_func(E, data[i, :], label=types[i])
ax.set_xlabel('Energy [eV]')
if plot_CE:
ax.set_xlim(_MIN_E, _MAX_E)
else:
ax.set_xlim(E[-1], E[0])
if divisor_types:
if data_type == 'nuclide':
ylabel = 'Nuclidic Microscopic Data'
elif data_type == 'element':
ylabel = 'Elemental Microscopic Data'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Data'
else:
if data_type == 'nuclide':
ylabel = 'Microscopic Cross Section [b]'
elif data_type == 'element':
ylabel = 'Elemental Cross Section [b]'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Cross Section [1/cm]'
ax.set_ylabel(ylabel)
ax.legend(loc='best')
name = this.name if data_type == 'material' else this
if len(types) > 1:
ax.set_title('Cross Sections for ' + name)
else:
ax.set_title('Cross Section for ' + name)
return fig
def calculate_cexs(this, data_type, types, temperature=294., sab_name=None,
cross_sections=None, enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : {str, openmc.Nuclide, openmc.Element, openmc.Material}
Object to source data from
data_type : {'nuclide', 'element', material'}
Type of object to plot
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if sab_name:
cv.check_type('sab_name', sab_name, str)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
if data_type == 'nuclide':
if isinstance(this, str):
nuc = openmc.Nuclide(this)
else:
nuc = this
energy_grid, xs = _calculate_cexs_nuclide(nuc, types, temperature,
sab_name, cross_sections)
# Convert xs (Iterable of Callable) to a grid of cross section values
# calculated on @ the points in energy_grid for consistency with the
# element and material functions.
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
data[line, :] = xs[line](energy_grid)
elif data_type == 'element':
if isinstance(this, str):
elem = openmc.Element(this)
else:
elem = this
energy_grid, data = _calculate_cexs_elem_mat(elem, types, temperature,
cross_sections, sab_name,
enrichment)
elif data_type == 'material':
cv.check_type('this', this, openmc.Material)
energy_grid, data = _calculate_cexs_elem_mat(this, types, temperature,
cross_sections)
else:
raise TypeError("Invalid type")
return energy_grid, data
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None,
cross_sections=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Nuclide
Nuclide object to source data from
types : Iterable of str or Integral
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES or integers which correspond to reaction
channel (MT) numbers.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : Iterable of Callable
Requested cross section functions
"""
# Parse the types
mts = []
ops = []
yields = []
for line in types:
if line in PLOT_TYPES:
mts.append(PLOT_TYPES_MT[line])
if line.startswith('nu'):
yields.append(True)
else:
yields.append(False)
ops.append(PLOT_TYPES_OP[line])
else:
# Not a built-in type, we have to parse it ourselves
cv.check_type('MT in types', line, Integral)
cv.check_greater_than('MT in types', line, 0)
mts.append((line,))
ops.append(())
yields.append(False)
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
# Convert temperature to format needed for access in the library
strT = "{}K".format(int(round(temperature)))
T = temperature
# Now we can create the data sets to be plotted
energy_grid = []
xs = []
lib = library.get_by_material(this)
if lib is not None:
nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path'])
# Obtain the nearest temperature
if strT in nuc.temperatures:
nucT = strT
else:
delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
nucT = nuc.temperatures[closest_index]
# Prep S(a,b) data if needed
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
# Obtain the nearest temperature
if strT in sab.temperatures:
sabT = strT
else:
delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
sabT = sab.temperatures[closest_index]
# Create an energy grid composed the S(a,b) and the nuclide's grid
grid = nuc.energy[nucT]
sab_Emax = 0.
sab_funcs = []
if sab.elastic_xs:
elastic = sab.elastic_xs[sabT]
if isinstance(elastic, openmc.data.CoherentElastic):
grid = np.union1d(grid, elastic.bragg_edges)
if elastic.bragg_edges[-1] > sab_Emax:
sab_Emax = elastic.bragg_edges[-1]
elif isinstance(elastic, openmc.data.Tabulated1D):
grid = np.union1d(grid, elastic.x)
if elastic.x[-1] > sab_Emax:
sab_Emax = elastic.x[-1]
sab_funcs.append(elastic)
if sab.inelastic_xs:
inelastic = sab.inelastic_xs[sabT]
grid = np.union1d(grid, inelastic.x)
if inelastic.x[-1] > sab_Emax:
sab_Emax = inelastic.x[-1]
sab_funcs.append(inelastic)
energy_grid = grid
else:
energy_grid = nuc.energy[nucT]
for i, mt_set in enumerate(mts):
# Get the reaction xs data from the nuclide
funcs = []
op = ops[i]
for mt in mt_set:
if mt == 2:
if sab_name:
# Then we need to do a piece-wise function of
# The S(a,b) and non-thermal data
sab_sum = openmc.data.Sum(sab_funcs)
pw_funcs = openmc.data.Regions1D(
[sab_sum, nuc[mt].xs[nucT]],
[sab_Emax])
funcs.append(pw_funcs)
else:
funcs.append(nuc[mt].xs[nucT])
elif mt in nuc:
if yields[i]:
# Get the total yield first if available. This will be
# used primarily for fission.
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode == 'total':
func = openmc.data.Combination(
[nuc[mt].xs[nucT], prod.yield_],
[np.multiply])
funcs.append(func)
break
else:
# Total doesn't exist so we have to create from
# prompt and delayed. This is used for scatter
# multiplication.
func = None
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode != 'total':
if func:
func = openmc.data.Combination(
[prod.yield_, func], [np.add])
else:
func = prod.yield_
if func:
funcs.append(openmc.data.Combination(
[func, nuc[mt].xs[nucT]], [np.multiply]))
else:
# If func is still None, then there were no
# products. In that case, assume the yield is
# one as its not provided for some summed
# reactions like MT=4
funcs.append(nuc[mt].xs[nucT])
else:
funcs.append(nuc[mt].xs[nucT])
elif mt == UNITY_MT:
funcs.append(lambda x: 1.)
elif mt == XI_MT:
awr = nuc.atomic_weight_ratio
alpha = ((awr - 1.) / (awr + 1.))**2
xi = 1. + alpha * np.log(alpha) / (1. - alpha)
funcs.append(lambda x: xi)
else:
funcs.append(lambda x: 0.)
xs.append(openmc.data.Combination(funcs, op))
else:
raise ValueError(this + " not in library")
return energy_grid, xs
def _calculate_cexs_elem_mat(this, types, temperature=294.,
cross_sections=None, sab_name=None,
enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Material or openmc.Element
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
else:
T = temperature
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
if isinstance(this, openmc.Material):
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[1][0]: nuclide[1][1]
for nuclide in nuclides.items()}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[1][0]: nuclide[1][0]
for nuclide in nuclides.items()}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(1., 'ao', enrichment=enrichment,
cross_sections=cross_sections)
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[0]: nuclide[1] for nuclide in nuclides}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[0]: nuclide[0] for nuclide in nuclides}
# Identify the nuclides which have S(a,b) data
sabs = {}
for nuclide in nuclides.items():
sabs[nuclide[0]] = None
if isinstance(this, openmc.Material):
for sab_name in this._sab:
sab = openmc.data.ThermalScattering.from_hdf5(
library.get_by_material(sab_name)['path'])
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
else:
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
# Now we can create the data sets to be plotted
xs = {}
E = []
for nuclide in nuclides.items():
name = nuclide[0]
nuc = nuclide[1]
sab_tab = sabs[name]
temp_E, temp_xs = calculate_cexs(nuc, 'nuclide', types, T, sab_tab,
cross_sections)
E.append(temp_E)
# Since the energy grids are different, store the cross sections as
# a tabulated function so they can be calculated on any grid needed.
xs[name] = [openmc.data.Tabulated1D(temp_E, temp_xs[line])
for line in range(len(types))]
# Condense the data for every nuclide
# First create a union energy grid
energy_grid = E[0]
for grid in E[1:]:
energy_grid = np.union1d(energy_grid, grid)
# Now we can combine all the nuclidic data
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for nuclide in nuclides.items():
name = nuclide[0]
data[line, :] += (nuc_fractions[name] *
xs[name][line](energy_grid))
return energy_grid, data
def calculate_mgxs(this, data_type, types, orders=None, temperature=294.,
cross_sections=None, ce_cross_sections=None,
enrichment=None):
"""Calculates multi-group cross sections of a requested type.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : str or openmc.Material
Object to source data from
data_type : {'nuclide', 'element', material', 'macroscopic'}
Type of object to plot
types : Iterable of values of PLOT_TYPES_MGXS
The type of cross sections to calculate
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding an openmc.Element object passed as this
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
cv.check_iterable_type('types', types, str)
cv.check_type("cross_sections", cross_sections, str)
library = openmc.MGXSLibrary.from_hdf5(cross_sections)
if data_type in ('nuclide', 'macroscopic'):
mgxs = _calculate_mgxs_nuc_macro(this, types, library, orders,
temperature)
elif data_type in ('element', 'material'):
mgxs = _calculate_mgxs_elem_mat(this, types, library, orders,
temperature, ce_cross_sections,
enrichment)
else:
raise TypeError("Invalid type")
# Convert the data to the format needed
data = np.zeros((len(types), 2 * library.energy_groups.num_groups))
energy_grid = np.zeros(2 * library.energy_groups.num_groups)
for g in range(library.energy_groups.num_groups):
energy_grid[g * 2: g * 2 + 2] = \
library.energy_groups.group_edges[g: g + 2]
# Ensure the energy will show on a log-axis by replacing 0s with a
# sufficiently small number
energy_grid[0] = max(energy_grid[0], _MIN_E)
for line in range(len(types)):
for g in range(library.energy_groups.num_groups):
data[line, g * 2: g * 2 + 2] = mgxs[line, g]
return energy_grid[::-1], data
def _calculate_mgxs_nuc_macro(this, types, library, orders=None,
temperature=294.):
"""Determines the multi-group cross sections of a nuclide or macroscopic
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Nuclide or openmc.Macroscopic
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check the parameters and grab order/delayed groups
if orders:
cv.check_iterable_type('orders', orders, Integral,
min_depth=len(types), max_depth=len(types))
else:
orders = [None] * len(types)
for i, line in enumerate(types):
cv.check_type("line", line, str)
cv.check_value("line", line, PLOT_TYPES_MGXS)
if orders[i]:
cv.check_greater_than("order value", orders[i], 0, equality=True)
xsdata = library.get_by_name(this)
if xsdata is not None:
# Obtain the nearest temperature
t = np.abs(xsdata.temperatures - temperature).argmin()
# Get the data
data = np.zeros((len(types), library.energy_groups.num_groups))
for i, line in enumerate(types):
if 'fission' in line and not xsdata.fissionable:
continue
elif line == 'unity':
data[i, :] = 1.
else:
# Now we have to get the cross section data and properly
# treat it depending on the requested type.
# First get the data in a generic fashion
temp_data = getattr(xsdata, _PLOT_MGXS_ATTR[line])[t]
shape = temp_data.shape[:]
# If we have angular data, then want the geometric
# average over all provided angles. Since the angles are
# equi-distant, un-weighted averaging will suffice
if xsdata.representation == 'angle':
temp_data = np.mean(temp_data, axis=(0, 1))
# Now we can look at the shape of the data to identify how
# it should be modified to produce an array of values
# with groups.
if shape in (xsdata.xs_shapes["[G']"],
xsdata.xs_shapes["[G]"]):
# Then the data is already an array vs groups so copy
# and move along
data[i, :] = temp_data
elif shape == xsdata.xs_shapes["[G][G']"]:
# Sum the data over outgoing groups to create our array vs
# groups
data[i, :] = np.sum(temp_data, axis=1)
elif shape == xsdata.xs_shapes["[DG]"]:
# Then we have a constant vs groups with a value for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i]]
else:
data[i, :] = np.sum(temp_data[:])
elif shape in (xsdata.xs_shapes["[DG][G']"],
xsdata.xs_shapes["[DG][G]"]):
# Then we have an array vs groups with values for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[DG][G][G']"]:
# Then we have a delayed group matrix. We will first
# remove the outgoing group dependency
temp_data = np.sum(temp_data, axis=-1)
# And then proceed in exactly the same manner as the
# "[DG][G']" or "[DG][G]" shapes in the previous block.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[G][G'][Order]"]:
# This is a scattering matrix with angular data
# First remove the outgoing group dependence
temp_data = np.sum(temp_data, axis=1)
# The user either provided a specific order or we resort
# to the default 0th order
if orders[i]:
order = orders[i]
else:
order = 0
# If the order is available, store the data for that order
# if it is not available, then the expansion coefficient
# is zero and thus we already have the correct value.
if order < shape[1]:
data[i, :] = temp_data[:, order]
else:
raise ValueError("{} not present in provided MGXS "
"library".format(this))
return data
def _calculate_mgxs_elem_mat(this, types, library, orders=None,
temperature=294., ce_cross_sections=None,
enrichment=None):
"""Determines the multi-group cross sections of an element or material
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Element or openmc.Material
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding the elements
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
# Check to see if we have nuclides/elements or a macrocopic object
if this._macroscopic is not None:
# We have macroscopics
nuclides = {this._macroscopic: (this._macroscopic, this.density)}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out nuc and nuc_density
nuc_fraction = [nuclide[1][1] for nuclide in nuclides.items()]
else:
T = temperature
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(100., 'ao', enrichment=enrichment,
cross_sections=ce_cross_sections)
# For ease of processing split out nuc and nuc_fractions
nuc_fraction = [nuclide[1] for nuclide in nuclides]
nuc_data = []
for nuclide in nuclides.items():
nuc_data.append(_calculate_mgxs_nuc_macro(nuclide[0], types, library,
orders, T))
# Combine across the nuclides
data = np.zeros((len(types), library.energy_groups.num_groups))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for n in range(len(nuclides)):
data[line, :] += nuc_fraction[n] * nuc_data[n][line, :]
return data
|
johnnyliu27/openmc
|
openmc/plotter.py
|
Python
|
mit
| 38,526
|
#Library avaiable here: https://github.com/FND/markdown-checklist
import re
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from markdown.postprocessors import Postprocessor
def makeExtension(configs=[]):
return ChecklistExtension(configs=configs)
class ChecklistExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.postprocessors.add('checklist', ChecklistPostprocessor(md),
'>raw_html')
class ChecklistPostprocessor(Postprocessor):
"""
adds checklist class to list element
"""
pattern = re.compile(r'<li>\[([ Xx])\]')
def run(self, html):
html = re.sub(self.pattern, self._convert_checkbox, html)
before = '<ul>\n<li><input type="checkbox"'
after = before.replace('<ul>', '<ul class="checklist">')
return html.replace(before, after)
def _convert_checkbox(self, match):
state = match.group(1)
checked = ' checked' if state != ' ' else ''
return '<li><input type="checkbox" disabled%s>' % checked
|
byohay/Remarkable
|
markdown/extensions/markdown_checklist.py
|
Python
|
mit
| 1,078
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient import base
class QoSSpecs(base.Resource):
"""QoS specs entity represents quality-of-service parameters/requirements.
A QoS specs is a set of parameters or requirements for quality-of-service
purpose, which can be associated with volume types (for now). In future,
QoS specs may be extended to be associated other entities, such as single
volume.
"""
def __repr__(self):
return "<QoSSpecs: %s>" % self.name
def delete(self):
return self.manager.delete(self)
class QoSSpecsManager(base.ManagerWithFind):
"""
Manage :class:`QoSSpecs` resources.
"""
resource_class = QoSSpecs
def list(self, search_opts=None):
"""Get a list of all qos specs.
:rtype: list of :class:`QoSSpecs`.
"""
return self._list("/qos-specs", "qos_specs")
def get(self, qos_specs):
"""Get a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to get.
:rtype: :class:`QoSSpecs`
"""
return self._get("/qos-specs/%s" % base.getid(qos_specs), "qos_specs")
def delete(self, qos_specs, force=False):
"""Delete a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to be removed.
:param force: Flag that indicates whether to delete target qos specs
if it was in-use.
"""
self._delete("/qos-specs/%s?force=%s" %
(base.getid(qos_specs), force))
def create(self, name, specs):
"""Create a qos specs.
:param name: Descriptive name of the qos specs, must be unique
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {
"name": name,
}
}
body["qos_specs"].update(specs)
return self._create("/qos-specs", body, "qos_specs")
def set_keys(self, qos_specs, specs):
"""Add/Update keys in qos specs.
:param qos_specs: The ID of qos specs
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {}
}
body["qos_specs"].update(specs)
return self._update("/qos-specs/%s" % qos_specs, body)
def unset_keys(self, qos_specs, specs):
"""Remove keys from a qos specs.
:param qos_specs: The ID of qos specs
:param specs: A list of key to be unset
:rtype: :class:`QoSSpecs`
"""
body = {'keys': specs}
return self._update("/qos-specs/%s/delete_keys" % qos_specs,
body)
def get_associations(self, qos_specs):
"""Get associated entities of a qos specs.
:param qos_specs: The id of the :class: `QoSSpecs`
:return: a list of entities that associated with specific qos specs.
"""
return self._list("/qos-specs/%s/associations" % base.getid(qos_specs),
"qos_associations")
def associate(self, qos_specs, vol_type_id):
"""Associate a volume type with specific qos specs.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/associate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate_all(self, qos_specs):
"""Disassociate all entities from specific qos specs.
:param qos_specs: The qos specs to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate_all" %
base.getid(qos_specs))
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/cinderclient/v2/qos_specs.py
|
Python
|
mit
| 4,789
|
#!/usr/bin/env python
#
# Raman off-resonant activity calculator
# using VASP as a back-end.
#
# Contributors: Alexandr Fonari (Georgia Tech)
# Shannon Stauffer (UT Austin)
#
# MIT license, 2013
#
def parse_poscar_header(inp_fh):
import sys
from math import sqrt
#
inp_fh.seek(0) # just in case
poscar_header = ""
vol = 0.0
b = []
atom_numbers = []
#
inp_fh.readline() # skip title
scale = float(inp_fh.readline())
for i in range(3): b.append( [float(s) for s in inp_fh.readline().split()] )
#
if scale > 0.0:
b = [[ b[i][j]*scale for i in range(3)] for j in range(3) ]
scale = 1.0
#
vol = b[0][0]*b[1][1]*b[2][2] + b[1][0]*b[2][1]*b[0][2] + b[2][0]*b[0][1]*b[1][2] - \
b[0][2]*b[1][1]*b[2][0] - b[2][1]*b[1][2]*b[0][0] - b[2][2]*b[0][1]*b[1][0]
else:
print "[parse_poscar]: ERROR negative scale not implemented."
vol = scale
sys.exit(1)
#
atom_labels = inp_fh.readline() # yes, it is hardcoded for VASP5
atom_numbers = [int(s) for s in inp_fh.readline().split()]
nat = sum(atom_numbers)
#
poscar_header += "%15.12f\n" % scale
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[0][0], b[0][1], b[0][2])
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[1][0], b[1][1], b[1][2])
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[2][0], b[2][1], b[2][2])
poscar_header += atom_labels
poscar_header += " ".join(str(x) for x in atom_numbers)+"\n"
#
return nat, vol, poscar_header
#
def parse_env_params(params):
import sys
#
tmp = params.strip().split('_')
if len(tmp) != 4:
print "[parse_env_params]: ERROR there should be exactly four parameters"
sys.exit(1)
#
[first, last, nderiv, step_size] = [int(tmp[0]), int(tmp[1]), int(tmp[2]), float(tmp[3])]
#
return first, last, nderiv, step_size
#
def get_modes_from_OUTCAR(outcar_fh, nat):
import sys
import re
from math import sqrt
eigvals = [ 0.0 for i in range(nat*3) ]
eigvecs = [ 0.0 for i in range(nat*3) ]
norms = [ 0.0 for i in range(nat*3) ]
pos = [ 0.0 for i in range(nat) ]
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "Eigenvectors after division by SQRT(mass)" in line:
outcar_fh.readline() # empty line
outcar_fh.readline() # Eigenvectors and eigenvalues of the dynamical matrix
outcar_fh.readline() # ----------------------------------------------------
outcar_fh.readline() # empty line
#
for i in range(nat*3): # all frequencies should be supplied, regardless of those requested to calculate
outcar_fh.readline() # empty line
p = re.search(r'^\s*(\d+).+?([\.\d]+) cm-1', outcar_fh.readline())
eigvals[i] = float(p.group(2))
#
outcar_fh.readline() # X Y Z dx dy dz
eigvec = []
#
for j in range(nat):
tmp = outcar_fh.readline().split()
if i == 0: pos[j] = [ float(tmp[x]) for x in range(3) ] # get atomic positions only once
#
eigvec.append([ float(tmp[x]) for x in range(3,6) ])
#
eigvecs[i] = eigvec
norms[i] = sqrt( sum( [abs(x)**2 for sublist in eigvec for x in sublist] ) )
#
return pos, eigvals, eigvecs, norms
#
print "[get_modes_from_OUTCAR]: ERROR Couldn't find 'Eigenvectors after division by SQRT(mass)' in OUTCAR. Use 'NWRITE=3' in INCAR. Exiting..."
sys.exit(1)
#
def get_epsilon_from_OUTCAR(outcar_fh):
import re
import sys
epsilon = []
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "MACROSCOPIC STATIC DIELECTRIC TENSOR" in line:
outcar_fh.readline()
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
return epsilon
#
raise RuntimeError("[get_epsilon_from_OUTCAR]: ERROR Couldn't find dielectric tensor in OUTCAR")
return 1
#
if __name__ == '__main__':
import sys
from math import pi
from shutil import move
import os
import datetime
import time
#import argparse
import optparse
#
print ""
print " Raman off-resonant activity calculator,"
print " using VASP as a back-end."
print ""
print " Contributors: Alexandr Fonari (Georgia Tech)"
print " Shannon Stauffer (UT Austin)"
print " MIT License, 2013"
print " URL: http://raman-sc.github.io"
print " Started at: "+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
print ""
#
description = "Before run, set environment variables:\n"
description += " VASP_RAMAN_RUN='mpirun vasp'\n"
description += " VASP_RAMAN_PARAMS='[first-mode]_[last-mode]_[nderiv]_[step-size]'\n\n"
description += "bash one-liner is:\n"
description += "VASP_RAMAN_RUN='mpirun vasp' VASP_RAMAN_PARAMS='1_2_2_0.01' python vasp_raman.py"
#
parser = optparse.OptionParser(description=description)
parser.add_option('-g', '--gen', help='Generate POSCAR only', action='store_true')
parser.add_option('-u', '--use_poscar', help='Use provided POSCAR in the folder, USE WITH CAUTION!!', action='store_true')
(options, args) = parser.parse_args()
#args = vars(parser.parse_args())
args = vars(options)
#
VASP_RAMAN_RUN = os.environ.get('VASP_RAMAN_RUN')
if VASP_RAMAN_RUN == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_RUN'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_RUN='"+VASP_RAMAN_RUN+"'"
#
VASP_RAMAN_PARAMS = os.environ.get('VASP_RAMAN_PARAMS')
if VASP_RAMAN_PARAMS == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_PARAMS'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_PARAMS='"+VASP_RAMAN_PARAMS+"'"
#
first, last, nderiv, step_size = parse_env_params(VASP_RAMAN_PARAMS)
assert first >= 1, '[__main__]: First mode should be equal or larger than 1'
assert last >= first, '[__main__]: Last mode should be equal or larger than first mode'
if args['gen']: assert last == first, "[__main__]: '-gen' mode -> only generation for the one mode makes sense"
assert nderiv == 2, '[__main__]: At this time, nderiv = 2 is the only supported'
disps = [-1, 1] # hardcoded for
coeffs = [-0.5, 0.5] # three point stencil (nderiv=2)
#
try:
poscar_fh = open('POSCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open input file POSCAR.phon, exiting...\n"
sys.exit(1)
#
nat, vol, poscar_header = parse_poscar_header(poscar_fh)
poscar_fh.close()
#
try:
outcar_fh = open('OUTCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open OUTCAR.phon, exiting...\n"
sys.exit(1)
#
pos, eigvals, eigvecs, norms = get_modes_from_OUTCAR(outcar_fh, nat)
outcar_fh.close()
#
output_fh = open('vasp_raman.dat', 'w')
output_fh.write("# mode freq(cm-1) alpha beta2 activity\n")
for i in range(first-1, last):
eigval = eigvals[i]
eigvec = eigvecs[i]
norm = norms[i]
#
print ""
print "[__main__]: Mode #%i: frequency %10.7f cm-1; norm: %10.7f" % ( i+1, eigval, norm )
#
ra = [[0.0 for x in range(3)] for y in range(3)]
for j in range(len(disps)):
disp_filename = 'OUTCAR.%04d.%+d.out' % (i+1, disps[j])
#
try:
outcar_fh = open(disp_filename, 'r')
print "[__main__]: File "+disp_filename+" exists, parsing..."
except IOError:
if args['use_poscar'] != True:
print "[__main__]: File "+disp_filename+" not found, preparing displaced POSCAR"
poscar_fh = open('POSCAR', 'w')
poscar_fh.write("%s %4.1e \n" % (disp_filename, step_size))
poscar_fh.write(poscar_header)
poscar_fh.write("Cartesian\n")
#
for k in range(nat):
pos_disp = [ pos[k][l] + eigvec[k][l]*step_size*disps[j]/norm for l in range(3)]
poscar_fh.write( '%15.10f %15.10f %15.10f\n' % (pos_disp[0], pos_disp[1], pos_disp[2]) )
#print '%10.6f %10.6f %10.6f %10.6f %10.6f %10.6f' % (pos[k][0], pos[k][1], pos[k][2], dis[k][0], dis[k][1], dis[k][2])
poscar_fh.close()
else:
print "[__main__]: Using provided POSCAR"
#
if args['gen']: # only generate POSCARs
poscar_fn = 'POSCAR.%+d.out' % disps[j]
move('POSCAR', poscar_fn)
print "[__main__]: '-gen' mode -> "+poscar_fn+" with displaced atoms have been generated"
#
if j+1 == len(disps): # last iteration for the current displacements list
print "[__main__]: '-gen' mode -> POSCAR files with displaced atoms have been generated, exiting now"
sys.exit(0)
else: # run VASP here
print "[__main__]: Running VASP..."
os.system(VASP_RAMAN_RUN)
try:
move('OUTCAR', disp_filename)
except IOError:
print "[__main__]: ERROR Couldn't find OUTCAR file, exiting..."
sys.exit(1)
#
outcar_fh = open(disp_filename, 'r')
#
try:
eps = get_epsilon_from_OUTCAR(outcar_fh)
outcar_fh.close()
except Exception, err:
print err
print "[__main__]: Moving "+disp_filename+" back to 'OUTCAR' and exiting..."
move(disp_filename, 'OUTCAR')
sys.exit(1)
#
for m in range(3):
for n in range(3):
ra[m][n] += eps[m][n] * coeffs[j]/step_size * norm * vol/(4.0*pi)
#units: A^2/amu^1/2 = dimless * 1/A * 1/amu^1/2 * A^3
#
alpha = (ra[0][0] + ra[1][1] + ra[2][2])/3.0
beta2 = ( (ra[0][0] - ra[1][1])**2 + (ra[0][0] - ra[2][2])**2 + (ra[1][1] - ra[2][2])**2 + 6.0 * (ra[0][1]**2 + ra[0][2]**2 + ra[1][2]**2) )/2.0
print ""
print "! %4i freq: %10.5f alpha: %10.7f beta2: %10.7f activity: %10.7f " % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2)
output_fh.write("%i %10.5f %10.7f %10.7f %10.7f\n" % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2))
output_fh.flush()
#
output_fh.close()
|
alexandr-fonari/raman-sc
|
VASP/vasp_raman.py
|
Python
|
mit
| 11,443
|
def load_text_file(text_file: str) -> str:
with open(text_file, 'r') as f:
return f.read()
|
kyuridenamida/atcoder-tools
|
atcodertools/fileutils/load_text_file.py
|
Python
|
mit
| 103
|
"""
WSGI config for bangkok project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bangkok.settings")
application = get_wsgi_application()
|
zkan/microservices-with-swarm-101
|
services/front_gateway/front_gateway/wsgi.py
|
Python
|
mit
| 392
|
from struct import pack, unpack
from time import time
from communication.ComAPI.packet import Packet
class PacketLogin(Packet):
"""Class for constructing binary data based
on a common API between client / server."""
def __init__(self):
super().__init__()
self.packetID = 3
def encode(self, username, avatar, position):
"""
Encode a message with API format
DRPG + PacketID + username length + username
+ avatar length + avatar + x + y + z
"""
bContainer = super().encode()
## Add position
## TODO: Be aware of byte order from client for portable version
bContainer = bContainer.__add__(pack(">B" , len(username) ))
bContainer = bContainer.__add__(username.encode())
bContainer = bContainer.__add__(pack(">B",len(avatar)))
bContainer = bContainer.__add__(avatar.encode())
bContainer = bContainer.__add__(pack(">f", position[0]))
bContainer = bContainer.__add__(pack(">f", position[1]))
bContainer = bContainer.__add__(pack(">f", position[2]))
return bContainer
|
DanAurea/Trisdanvalwen
|
communication/ComAPI/packetLogin.py
|
Python
|
mit
| 1,024
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_vtc as electrum
from electrum_vtc.plugins import BasePlugin, hook
from electrum_vtc.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted)
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.iteritems():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
password = hashlib.sha1(mpk).digest().encode('hex')[:32]
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).digest().encode('hex')
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
pknight007/electrum-vtc
|
plugins/labels/labels.py
|
Python
|
mit
| 5,710
|
#!/usr/bin/env python2.7
from argparse import ArgumentParser
parser = ArgumentParser()
args = parser.parse_args()
from sys import exit
from subtlenet.models import cluster as train
import numpy as np
from subtlenet.utils import mpl, plt
from mpl_toolkits.mplot3d import Axes3D
train.NEPOCH = 10
train.encoded_size=2
dims = train.instantiate('RadialClustering')
gen = train.setup_data(batch_size=100)
clusterer, encoder = train.build_model(dims, w_ae=0.1)
train.train(clusterer, 'cluster', gen['train'], gen['validate'])
plotgen = train.gen(batch_size=1000, label=True)()
i, o, _ = next(plotgen)
i = i[0]
p = clusterer.predict(i)[1]
d = clusterer.predict(i)[0]
e = encoder.predict(i)
print
for b in xrange(1,4):
print i[b], d[b], p[b], np.argmax(p[b]), o[-1][b], e[b]
print i[-b], d[-b], p[-b], np.argmax(p[-b]), o[-1][-b], e[-b]
print
w = clusterer.get_weights()[-1][0]
print w
# make 2d plots
plt.clf()
cls = np.argmax(p, axis=-1)
mask = cls == 0
plt.scatter(e[:,0][mask], e[:,1][mask], c='b', alpha=0.5)
mask = cls == 1
plt.scatter(e[:,0][mask], e[:,1][mask], c='r', alpha=0.5)
plt.scatter(w[0], w[1], c='k')
plt.savefig('/home/snarayan/public_html/figs/clustering/encoded.png',bbox_inches='tight',dpi=300)
plt.savefig('/home/snarayan/public_html/figs/clustering/encoded.pdf')
plt.clf()
cls = np.argmax(p, axis=-1)
mask = o[-1] < 0.75
plt.scatter(e[:,0][mask], e[:,1][mask], c='k', alpha=0.5)
mask = o[-1] > 0.75
plt.scatter(e[:,0][mask], e[:,1][mask], c='m', alpha=0.5)
plt.savefig('/home/snarayan/public_html/figs/clustering/encoded_truth.png',bbox_inches='tight',dpi=300)
plt.savefig('/home/snarayan/public_html/figs/clustering/encoded_truth.pdf')
plt.clf()
fig = plt.figure()
ax = Axes3D(fig)
mask = cls == 0
ax.scatter(i[mask,0], i[mask,1], i[mask,2], c='b', alpha=0.5)
mask = cls == 1
ax.scatter(i[mask,0], i[mask,1], i[mask,2], c='r', alpha=0.5)
plt.savefig('/home/snarayan/public_html/figs/clustering/original_clust.png',bbox_inches='tight',dpi=300)
plt.savefig('/home/snarayan/public_html/figs/clustering/original_clust.pdf')
plt.clf()
fig = plt.figure()
ax = Axes3D(fig)
mask = o[-1] < 0.75
ax.scatter(i[mask,0], i[mask,1], i[mask,2], c='k', alpha=0.5)
mask = o[-1] > 0.75
ax.scatter(i[mask,0], i[mask,1], i[mask,2], c='m', alpha=0.5)
plt.savefig('/home/snarayan/public_html/figs/clustering/original.png',bbox_inches='tight',dpi=300)
plt.savefig('/home/snarayan/public_html/figs/clustering/original.pdf')
plt.clf()
fig = plt.figure()
ax = Axes3D(fig)
mask = cls == 0
ax.scatter(d[mask,0], d[mask,1], d[mask,2], c='b', alpha=0.5)
mask = cls == 1
ax.scatter(d[mask,0], d[mask,1], d[mask,2], c='r', alpha=0.5)
plt.savefig('/home/snarayan/public_html/figs/clustering/autoencoded.png',bbox_inches='tight',dpi=300)
plt.savefig('/home/snarayan/public_html/figs/clustering/autoencoded.pdf')
|
sidnarayanan/BAdNet
|
train/cluster/simple/run.py
|
Python
|
mit
| 2,825
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Blocking and non-blocking HTTP client implementations using pycurl."""
import io
import collections
import logging
import pycurl
import threading
import time
from tornado import httputil
from tornado import ioloop
from tornado import stack_context
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop=None, max_clients=10,
max_simultaneous_connections=None):
self.io_loop = io_loop
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [_curl_create(max_simultaneous_connections)
for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
try:
self._socket_action = self._multi.socket_action
except AttributeError:
# socket_action is found in pycurl since 7.18.2 (it's been
# in libcurl longer than that but wasn't accessible to
# python).
logging.warning("socket_action method missing from pycurl; "
"falling back to socket_all. Upgrading "
"libcurl and pycurl will improve performance")
self._socket_action = \
lambda fd, action: self._multi.socket_all()
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
def close(self):
self._force_timeout_callback.stop()
for curl in self._curls:
curl.close()
self._multi.close()
self._closed = True
super(CurlAsyncHTTPClient, self).close()
def fetch(self, request, callback, **kwargs):
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
self._requests.append((request, stack_context.wrap(callback)))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
if fd not in self._fds:
self._fds[fd] = ioloop_event
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
else:
self._fds[fd] = ioloop_event
self.io_loop.update_handler(fd, ioloop_event)
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
time.time() + msecs/1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout != -1:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": io.StringIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
# Disable IPv6 to mitigate the effects of this bug
# on curl versions <= 7.21.0
# http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
if pycurl.version_info()[2] <= 0x71500: # 7.21.0
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create(max_simultaneous_connections=None):
curl = pycurl.Curl()
if logging.getLogger().isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, utf8(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[utf8("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[utf8("%s: %s" % i) for i in request.headers.items()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers, line))
if request.streaming_callback:
curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback)
else:
curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, utf8(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.use_gzip:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
# (but see version check in _process_queue above)
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE"])
for o in list(curl_options.values()):
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
request_buffer = io.StringIO(utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, utf8(userpwd))
logging.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
logging.debug("%s %s", request.method, request.url)
if request.client_key is not None or request.client_cert is not None:
raise ValueError("Client certificate not supported with curl_httpclient")
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
logging.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
logging.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
logging.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
|
e1ven/Waymoot
|
libs/tornado-2.2/build/lib/tornado/curl_httpclient.py
|
Python
|
mit
| 18,106
|
#!/usr/bin/env python2
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 2 -*-
# Author: Fábio André Damas <skkeeper at gmail dot com>
from threading import Thread
from subprocess import Popen, PIPE
class PlaySound(Thread):
def __init__(self, filename, volume):
Thread.__init__(self)
self.filename = filename
self.volume = volume
def run(self):
cmd = 'play -v ' + self.volume + ' ' + self.filename
p = Popen(cmd, shell=True, stderr=PIPE, close_fds=True)
# TODO: Test if limits the number of clicks
p.wait()
if p.returncode != 0:
print '\033[1;31mWe found a error with SoX, did you install it?\033[1;m'
p.stderr.read()
|
skkeeper/linux-clicky
|
linux_clicky/play_sound.py
|
Python
|
mit
| 691
|
#!/usr/bin/env python
"""SciPy: Scientific Library for Python
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. The SciPy library
depends on NumPy, which provides convenient and fast N-dimensional
array manipulation. The SciPy library is built to work with NumPy
arrays, and provides many user-friendly and efficient numerical
routines such as routines for numerical integration and optimization.
Together, they run on all popular operating systems, are quick to
install, and are free of charge. NumPy and SciPy are easy to use,
but powerful enough to be depended upon by some of the world's
leading scientists and engineers. If you need to manipulate
numbers on a computer and display or publish the results,
give SciPy a try!
"""
DOCLINES = __doc__.split("\n")
import os
import sys
import subprocess
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.")
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 0
MINOR = 14
MICRO = 0
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# scipy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__SCIPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scipy.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('scipy/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import imp
version = imp.load_source('scipy.version', 'scipy/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='scipy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SCIPY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
try:
from sphinx.setup_command import BuildDoc
HAVE_SPHINX = True
except:
HAVE_SPHINX = False
if HAVE_SPHINX:
class ScipyBuildDoc(BuildDoc):
"""Run in-place build before Sphinx doc build"""
def run(self):
ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i'])
if ret != 0:
raise RuntimeError("Building Scipy failed!")
BuildDoc.run(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'scipy'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('scipy')
config.add_data_files(('scipy','*.txt'))
config.get_version('scipy/version.py')
return config
def setup_package():
# Rewrite the version file every time
write_version_py()
if HAVE_SPHINX:
cmdclass = {'build_sphinx': ScipyBuildDoc}
else:
cmdclass = {}
metadata = dict(
name = 'scipy',
maintainer = "SciPy Developers",
maintainer_email = "scipy-dev@scipy.org",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "http://www.scipy.org",
download_url = "http://sourceforge.net/projects/scipy/files/scipy/",
license = 'BSD',
cmdclass=cmdclass,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
)
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
FULLVERSION, GIT_REVISION = get_version_info()
metadata['version'] = FULLVERSION
else:
if len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel':
# bdist_wheel needs setuptools
import setuptools
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
largelymfs/w2vtools
|
build/scipy/setup.py
|
Python
|
mit
| 7,514
|
from django.conf.urls import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^$', views.TopicModelIndexView.as_view(), name='topics_models'),
url(r'^model/(?P<model_id>\d+)/$', views.TopicModelDetailView.as_view(), name='topics_model'),
url(r'^model/(?P<model_id>\d+)/topic/(?P<topic_id>\d+)/$', views.TopicDetailView.as_view(), name='topics_topic'),
url(r'^model/(?P<model_id>\d+)/topic/(?P<topic_id>\d+)/word/(?P<word_id>\d+)/$', views.TopicWordDetailView.as_view(),
name='topics_topic_word'),
)
|
hds-lab/textvisdrg-prototype
|
textvis/topics/urls.py
|
Python
|
mit
| 545
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Related to AboutOpenClasses in the Ruby Koans
#
from runner.koan import *
class AboutMonkeyPatching(Koan):
class Dog:
def bark(self):
return "WOOF"
def test_as_defined_dogs_do_bark(self):
fido = self.Dog()
self.assertEqual('WOOF', fido.bark())
# ------------------------------------------------------------------
# Add a new method to an existing class.
def test_after_patching_dogs_can_both_wag_and_bark(self):
def wag(self): return "HAPPY"
self.Dog.wag = wag
fido = self.Dog()
self.assertEqual('HAPPY', fido.wag())
self.assertEqual('WOOF', fido.bark())
# ------------------------------------------------------------------
def test_most_built_in_classes_cannot_be_monkey_patched(self):
try:
int.is_even = lambda self: (self % 2) == 0
except Exception as ex:
err_msg = ex.args[0]
self.assertRegex(err_msg, "can't set attributes of built-in/extension type 'int'")
# ------------------------------------------------------------------
class MyInt(int): pass
def test_subclasses_of_built_in_classes_can_be_be_monkey_patched(self):
self.MyInt.is_even = lambda self: (self % 2) == 0
self.assertEqual(False, self.MyInt(1).is_even())
self.assertEqual(True, self.MyInt(2).is_even())
|
gregkorte/Python-Koans
|
python3/koans/about_monkey_patching.py
|
Python
|
mit
| 1,426
|
import os, glob, shutil
from pathos import multiprocessing as mp
import pandas as pd
import numpy as np
base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
output_base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = ['rsds', 'vap' ]
for model in models:
variables = os.listdir( os.path.join( base_path, model ) )
_ = [ os.makedirs( os.path.join( base_path, model, variable ) ) for variable in variables if not os.path.exists( os.path.join( base_path, model, variable ) ) ]
for variable in variables:
print( ' '.join([model, variable]) )
output_path = os.path.join( output_base_path, model, variable, 'downscaled' )
cur_path = os.path.join( base_path, model, variable, 'downscaled' )
l = pd.Series( glob.glob( os.path.join( cur_path, '*.tif' ) ) )
grouper = [ os.path.basename(i).split( '_' )[ 5 ] for i in l ]
rcp_groups = l.groupby( grouper )
name_group = [ group for group in rcp_groups ]
names = [ i[0] for i in name_group ]
_ = [ os.makedirs( os.path.join( output_path, name ) ) for name in names if not os.path.exists( os.path.join( output_path, name ) ) ]
for count, name in enumerate( names ):
print count
group = name_group[ count ]
out_group = [ os.path.join( output_path, name, os.path.basename( i ) ) for i in group[1] ]
def run( x, y ):
import shutil
return shutil.move( x, y )
pool = mp.Pool( 15 )
out = pool.map( lambda x: run(x[0], x[1]), zip( group[1], out_group ) )
pool.close()
|
ua-snap/downscale
|
old/old_bin/sort_files_by_rcp.py
|
Python
|
mit
| 1,564
|
"""
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the range 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing)
* hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing)
* `rgb(<r>, <g>, <b>) `
* `rgba(<r>, <g>, <b>, <a>)`
"""
value_lower = value.lower()
try:
r, g, b = COLORS_BY_NAME[value_lower]
except KeyError:
pass
else:
return ints_to_rgba(r, g, b, None)
m = re.fullmatch(r_hex_short, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v * 2, 16) for v in rgb]
if a:
alpha: Optional[float] = int(a * 2, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_hex_long, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v, 16) for v in rgb]
if a:
alpha = int(a, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_rgb, value_lower)
if m:
return ints_to_rgba(*m.groups(), None) # type: ignore
m = re.fullmatch(r_rgba, value_lower)
if m:
return ints_to_rgba(*m.groups()) # type: ignore
m = re.fullmatch(r_hsl, value_lower)
if m:
h, h_units, s, l_ = m.groups()
return parse_hsl(h, h_units, s, l_)
m = re.fullmatch(r_hsla, value_lower)
if m:
h, h_units, s, l_, a = m.groups()
return parse_hsl(h, h_units, s, l_, parse_float_alpha(a))
raise ColorError(reason='string not recognised as a valid color')
def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA:
return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha))
def parse_color_value(value: Union[int, str], max_val: int = 255) -> float:
"""
Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number
in the range 0 to 1
"""
try:
color = float(value)
except ValueError:
raise ColorError(reason='color values must be a valid number')
if 0 <= color <= max_val:
return color / max_val
else:
raise ColorError(reason=f'color values must be in the range 0 to {max_val}')
def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]:
"""
Parse a value checking it's a valid float in the range 0 to 1
"""
if value is None:
return None
try:
if isinstance(value, str) and value.endswith('%'):
alpha = float(value[:-1]) / 100
else:
alpha = float(value)
except ValueError:
raise ColorError(reason='alpha values must be a valid float')
if almost_equal_floats(alpha, 1):
return None
elif 0 <= alpha <= 1:
return alpha
else:
raise ColorError(reason='alpha values must be in the range 0 to 1')
def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: Optional[float] = None) -> RGBA:
"""
Parse raw hue, saturation, lightness and alpha values and convert to RGBA.
"""
s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100)
h_value = float(h)
if h_units in {None, 'deg'}:
h_value = h_value % 360 / 360
elif h_units == 'rad':
h_value = h_value % rads / rads
else:
# turns
h_value = h_value % 1
r, g, b = hls_to_rgb(h_value, l_value, s_value)
return RGBA(r, g, b, alpha)
def float_to_255(c: float) -> int:
return int(round(c * 255))
COLORS_BY_NAME = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()}
|
samuelcolvin/pydantic
|
pydantic/color.py
|
Python
|
mit
| 16,607
|
#!/usr/bin/python2
import os, glob
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs' )
base_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
output_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/downscaled'
cru_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts20/akcan'
for root, dirs, files in os.walk( base_dir ):
if files:
path, variable = os.path.split( root )
path, model = os.path.split( path )
# this gets rid of any .xml or .txt files that may be living amongst the NetCDF's
files = [ fn for fn in files if fn.endswith( '.nc' ) ]
for fn in files:
print 'running %s' % fn
# split out the sub_dirs to have both model_name and variable folder hierarchy
# from the prepped folder directory
output_dir = os.path.join( output_base_dir, model, variable )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# anomalies calculation type and cru input path condition
if 'tas_' in os.path.basename( fn ):
anomalies_calc_type = 'absolute'
downscale_operation = 'add'
cru_path = os.path.join( cru_base_dir, 'tas' )
elif 'hur_' in os.path.basename( fn ):
anomalies_calc_type = 'proportional'
downscale_operation = 'mult'
cru_path = os.path.join( cru_base_dir, 'hur' )
plev = 1000
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
# condition to determine if we need to read in the historical dataset with the modeled for
# anomalies calculation
if 'historical' in fn:
# run with only the historical file
dates = os.path.basename( fn ).strip( '.nc' ).split( '_' )
dates = dates[ len( dates )-2 ], dates[ len( dates )-1 ]
begin_time, end_time = [ '-'.join([ i[:4], i[4:] ]) for i in dates ]
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
# run with only the historical file
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -plev ' + str(plev) + ' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
else:
# grab the historical file from that particular folder
historical_fn = glob.glob( os.path.join( root, '*historical*.nc' ) )[0]
# run with both historical and modeled files for anomalies calc.
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + ' -plev ' + str(plev) + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
|
ua-snap/downscale
|
old/old_bin/downscaling_launcher.py
|
Python
|
mit
| 3,849
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-25 18:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserActivation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activation_key', models.CharField(max_length=128)),
('key_expires', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
Astroxslurg/interlecture
|
interlecture/interauth/migrations/0001_initial.py
|
Python
|
mit
| 905
|
# Copyright (C) 2011 by Mark Visser <mjmvisser@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This code is based on the AMD Display Library 3.0 SDK
ADL_TRUE = 1 # ADL_SDK_3.0/include/adl_defines.h:52
ADL_FALSE = 0 # ADL_SDK_3.0/include/adl_defines.h:55
ADL_MAX_CHAR = 4096 # ADL_SDK_3.0/include/adl_defines.h:59
ADL_MAX_PATH = 256 # ADL_SDK_3.0/include/adl_defines.h:62
ADL_MAX_ADAPTERS = 150 # ADL_SDK_3.0/include/adl_defines.h:65
ADL_MAX_DISPLAYS = 150 # ADL_SDK_3.0/include/adl_defines.h:68
ADL_MAX_DEVICENAME = 32 # ADL_SDK_3.0/include/adl_defines.h:71
ADL_ADAPTER_INDEX_ALL = -1 # ADL_SDK_3.0/include/adl_defines.h:74
ADL_MAIN_API_OPTION_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:77
ADL_DDC_OPTION_SWITCHDDC2 = 1 # ADL_SDK_3.0/include/adl_defines.h:90
ADL_DDC_OPTION_RESTORECOMMAND = 2 # ADL_SDK_3.0/include/adl_defines.h:93
ADL_DL_I2C_ACTIONREAD = 1 # ADL_SDK_3.0/include/adl_defines.h:104
ADL_DL_I2C_ACTIONWRITE = 2 # ADL_SDK_3.0/include/adl_defines.h:105
ADL_DL_I2C_ACTIONREAD_REPEATEDSTART = 3 # ADL_SDK_3.0/include/adl_defines.h:106
ADL_OK_WAIT = 4 # ADL_SDK_3.0/include/adl_defines.h:122
ADL_OK_RESTART = 3 # ADL_SDK_3.0/include/adl_defines.h:125
ADL_OK_MODE_CHANGE = 2 # ADL_SDK_3.0/include/adl_defines.h:128
ADL_OK_WARNING = 1 # ADL_SDK_3.0/include/adl_defines.h:131
ADL_OK = 0 # ADL_SDK_3.0/include/adl_defines.h:134
ADL_ERR = -1 # ADL_SDK_3.0/include/adl_defines.h:137
ADL_ERR_NOT_INIT = -2 # ADL_SDK_3.0/include/adl_defines.h:140
ADL_ERR_INVALID_PARAM = -3 # ADL_SDK_3.0/include/adl_defines.h:143
ADL_ERR_INVALID_PARAM_SIZE = -4 # ADL_SDK_3.0/include/adl_defines.h:146
ADL_ERR_INVALID_ADL_IDX = -5 # ADL_SDK_3.0/include/adl_defines.h:149
ADL_ERR_INVALID_CONTROLLER_IDX = -6 # ADL_SDK_3.0/include/adl_defines.h:152
ADL_ERR_INVALID_DIPLAY_IDX = -7 # ADL_SDK_3.0/include/adl_defines.h:155
ADL_ERR_NOT_SUPPORTED = -8 # ADL_SDK_3.0/include/adl_defines.h:158
ADL_ERR_NULL_POINTER = -9 # ADL_SDK_3.0/include/adl_defines.h:161
ADL_ERR_DISABLED_ADAPTER = -10 # ADL_SDK_3.0/include/adl_defines.h:164
ADL_ERR_INVALID_CALLBACK = -11 # ADL_SDK_3.0/include/adl_defines.h:167
ADL_ERR_RESOURCE_CONFLICT = -12 # ADL_SDK_3.0/include/adl_defines.h:170
ADL_DT_MONITOR = 0 # ADL_SDK_3.0/include/adl_defines.h:185
ADL_DT_TELEVISION = 1 # ADL_SDK_3.0/include/adl_defines.h:188
ADL_DT_LCD_PANEL = 2 # ADL_SDK_3.0/include/adl_defines.h:191
ADL_DT_DIGITAL_FLAT_PANEL = 3 # ADL_SDK_3.0/include/adl_defines.h:194
ADL_DT_COMPONENT_VIDEO = 4 # ADL_SDK_3.0/include/adl_defines.h:197
ADL_DT_PROJECTOR = 5 # ADL_SDK_3.0/include/adl_defines.h:200
ADL_DOT_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:210
ADL_DOT_COMPOSITE = 1 # ADL_SDK_3.0/include/adl_defines.h:213
ADL_DOT_SVIDEO = 2 # ADL_SDK_3.0/include/adl_defines.h:216
ADL_DOT_ANALOG = 3 # ADL_SDK_3.0/include/adl_defines.h:219
ADL_DOT_DIGITAL = 4 # ADL_SDK_3.0/include/adl_defines.h:222
ADL_DISPLAY_COLOR_BRIGHTNESS = 1 # ADL_SDK_3.0/include/adl_defines.h:232
ADL_DISPLAY_COLOR_CONTRAST = 2 # ADL_SDK_3.0/include/adl_defines.h:233
ADL_DISPLAY_COLOR_SATURATION = 4 # ADL_SDK_3.0/include/adl_defines.h:234
ADL_DISPLAY_COLOR_HUE = 8 # ADL_SDK_3.0/include/adl_defines.h:235
ADL_DISPLAY_COLOR_TEMPERATURE = 16 # ADL_SDK_3.0/include/adl_defines.h:236
ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_EDID = 32 # ADL_SDK_3.0/include/adl_defines.h:240
ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_USER = 64 # ADL_SDK_3.0/include/adl_defines.h:243
ADL_DISPLAY_ADJUST_OVERSCAN = 1 # ADL_SDK_3.0/include/adl_defines.h:253
ADL_DISPLAY_ADJUST_VERT_POS = 2 # ADL_SDK_3.0/include/adl_defines.h:254
ADL_DISPLAY_ADJUST_HOR_POS = 4 # ADL_SDK_3.0/include/adl_defines.h:255
ADL_DISPLAY_ADJUST_VERT_SIZE = 8 # ADL_SDK_3.0/include/adl_defines.h:256
ADL_DISPLAY_ADJUST_HOR_SIZE = 16 # ADL_SDK_3.0/include/adl_defines.h:257
ADL_DISPLAY_ADJUST_SIZEPOS = 30 # ADL_SDK_3.0/include/adl_defines.h:258
ADL_DISPLAY_CUSTOMMODES = 32 # ADL_SDK_3.0/include/adl_defines.h:259
ADL_DISPLAY_ADJUST_UNDERSCAN = 64 # ADL_SDK_3.0/include/adl_defines.h:260
ADL_DESKTOPCONFIG_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:271
ADL_DESKTOPCONFIG_SINGLE = 1 # ADL_SDK_3.0/include/adl_defines.h:272
ADL_DESKTOPCONFIG_CLONE = 4 # ADL_SDK_3.0/include/adl_defines.h:273
ADL_DESKTOPCONFIG_BIGDESK_H = 16 # ADL_SDK_3.0/include/adl_defines.h:274
ADL_DESKTOPCONFIG_BIGDESK_V = 32 # ADL_SDK_3.0/include/adl_defines.h:275
ADL_DESKTOPCONFIG_BIGDESK_HR = 64 # ADL_SDK_3.0/include/adl_defines.h:276
ADL_DESKTOPCONFIG_BIGDESK_VR = 128 # ADL_SDK_3.0/include/adl_defines.h:277
ADL_DESKTOPCONFIG_RANDR12 = 256 # ADL_SDK_3.0/include/adl_defines.h:278
ADL_MAX_DISPLAY_NAME = 256 # ADL_SDK_3.0/include/adl_defines.h:284
ADL_DISPLAYDDCINFOEX_FLAG_PROJECTORDEVICE = 1 # ADL_SDK_3.0/include/adl_defines.h:292
ADL_DISPLAYDDCINFOEX_FLAG_EDIDEXTENSION = 2 # ADL_SDK_3.0/include/adl_defines.h:293
ADL_DISPLAYDDCINFOEX_FLAG_DIGITALDEVICE = 4 # ADL_SDK_3.0/include/adl_defines.h:294
ADL_DISPLAYDDCINFOEX_FLAG_HDMIAUDIODEVICE = 8 # ADL_SDK_3.0/include/adl_defines.h:295
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORTS_AI = 16 # ADL_SDK_3.0/include/adl_defines.h:296
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC601 = 32 # ADL_SDK_3.0/include/adl_defines.h:297
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC709 = 64 # ADL_SDK_3.0/include/adl_defines.h:298
ADL_DISPLAY_CONTYPE_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:308
ADL_DISPLAY_CONTYPE_VGA = 1 # ADL_SDK_3.0/include/adl_defines.h:309
ADL_DISPLAY_CONTYPE_DVI_D = 2 # ADL_SDK_3.0/include/adl_defines.h:310
ADL_DISPLAY_CONTYPE_DVI_I = 3 # ADL_SDK_3.0/include/adl_defines.h:311
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NTSC = 4 # ADL_SDK_3.0/include/adl_defines.h:312
ADL_DISPLAY_CONTYPE_ATICVDONGLE_JPN = 5 # ADL_SDK_3.0/include/adl_defines.h:313
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_JPN = 6 # ADL_SDK_3.0/include/adl_defines.h:314
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_NTSC = 7 # ADL_SDK_3.0/include/adl_defines.h:315
ADL_DISPLAY_CONTYPE_HDMI_TYPE_A = 10 # ADL_SDK_3.0/include/adl_defines.h:316
ADL_DISPLAY_CONTYPE_HDMI_TYPE_B = 11 # ADL_SDK_3.0/include/adl_defines.h:317
ADL_DISPLAY_CONTYPE_SVIDEO = 12 # ADL_SDK_3.0/include/adl_defines.h:318
ADL_DISPLAY_CONTYPE_COMPOSITE = 13 # ADL_SDK_3.0/include/adl_defines.h:319
ADL_DISPLAY_CONTYPE_RCA_3COMPONENT = 14 # ADL_SDK_3.0/include/adl_defines.h:320
ADL_DISPLAY_CONTYPE_DISPLAYPORT = 15 # ADL_SDK_3.0/include/adl_defines.h:321
ADL_TV_STANDARDS = 1 # ADL_SDK_3.0/include/adl_defines.h:331
ADL_TV_SCART = 2 # ADL_SDK_3.0/include/adl_defines.h:332
ADL_STANDARD_NTSC_M = 1 # ADL_SDK_3.0/include/adl_defines.h:336
ADL_STANDARD_NTSC_JPN = 2 # ADL_SDK_3.0/include/adl_defines.h:337
ADL_STANDARD_NTSC_N = 4 # ADL_SDK_3.0/include/adl_defines.h:338
ADL_STANDARD_PAL_B = 8 # ADL_SDK_3.0/include/adl_defines.h:339
ADL_STANDARD_PAL_COMB_N = 16 # ADL_SDK_3.0/include/adl_defines.h:340
ADL_STANDARD_PAL_D = 32 # ADL_SDK_3.0/include/adl_defines.h:341
ADL_STANDARD_PAL_G = 64 # ADL_SDK_3.0/include/adl_defines.h:342
ADL_STANDARD_PAL_H = 128 # ADL_SDK_3.0/include/adl_defines.h:343
ADL_STANDARD_PAL_I = 256 # ADL_SDK_3.0/include/adl_defines.h:344
ADL_STANDARD_PAL_K = 512 # ADL_SDK_3.0/include/adl_defines.h:345
ADL_STANDARD_PAL_K1 = 1024 # ADL_SDK_3.0/include/adl_defines.h:346
ADL_STANDARD_PAL_L = 2048 # ADL_SDK_3.0/include/adl_defines.h:347
ADL_STANDARD_PAL_M = 4096 # ADL_SDK_3.0/include/adl_defines.h:348
ADL_STANDARD_PAL_N = 8192 # ADL_SDK_3.0/include/adl_defines.h:349
ADL_STANDARD_PAL_SECAM_D = 16384 # ADL_SDK_3.0/include/adl_defines.h:350
ADL_STANDARD_PAL_SECAM_K = 32768 # ADL_SDK_3.0/include/adl_defines.h:351
ADL_STANDARD_PAL_SECAM_K1 = 65536 # ADL_SDK_3.0/include/adl_defines.h:352
ADL_STANDARD_PAL_SECAM_L = 131072 # ADL_SDK_3.0/include/adl_defines.h:353
ADL_CUSTOMIZEDMODEFLAG_MODESUPPORTED = 1 # ADL_SDK_3.0/include/adl_defines.h:364
ADL_CUSTOMIZEDMODEFLAG_NOTDELETETABLE = 2 # ADL_SDK_3.0/include/adl_defines.h:365
ADL_CUSTOMIZEDMODEFLAG_INSERTBYDRIVER = 4 # ADL_SDK_3.0/include/adl_defines.h:366
ADL_CUSTOMIZEDMODEFLAG_INTERLACED = 8 # ADL_SDK_3.0/include/adl_defines.h:367
ADL_CUSTOMIZEDMODEFLAG_BASEMODE = 16 # ADL_SDK_3.0/include/adl_defines.h:368
ADL_DISPLAYDDCINFOEX_FLAG_PROJECTORDEVICE = 1 # ADL_SDK_3.0/include/adl_defines.h:378
ADL_DISPLAYDDCINFOEX_FLAG_EDIDEXTENSION = 2 # ADL_SDK_3.0/include/adl_defines.h:379
ADL_DISPLAYDDCINFOEX_FLAG_DIGITALDEVICE = 4 # ADL_SDK_3.0/include/adl_defines.h:380
ADL_DISPLAYDDCINFOEX_FLAG_HDMIAUDIODEVICE = 8 # ADL_SDK_3.0/include/adl_defines.h:381
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORTS_AI = 16 # ADL_SDK_3.0/include/adl_defines.h:382
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC601 = 32 # ADL_SDK_3.0/include/adl_defines.h:383
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC709 = 64 # ADL_SDK_3.0/include/adl_defines.h:384
ADL_DISPLAY_CV_DONGLE_D1 = 1 # ADL_SDK_3.0/include/adl_defines.h:394
ADL_DISPLAY_CV_DONGLE_D2 = 2 # ADL_SDK_3.0/include/adl_defines.h:395
ADL_DISPLAY_CV_DONGLE_D3 = 4 # ADL_SDK_3.0/include/adl_defines.h:396
ADL_DISPLAY_CV_DONGLE_D4 = 8 # ADL_SDK_3.0/include/adl_defines.h:397
ADL_DISPLAY_CV_DONGLE_D5 = 16 # ADL_SDK_3.0/include/adl_defines.h:398
ADL_DISPLAY_CV_DONGLE_480I = 1 # ADL_SDK_3.0/include/adl_defines.h:403
ADL_DISPLAY_CV_DONGLE_480P = 2 # ADL_SDK_3.0/include/adl_defines.h:404
ADL_DISPLAY_CV_DONGLE_540P = 4 # ADL_SDK_3.0/include/adl_defines.h:405
ADL_DISPLAY_CV_DONGLE_720P = 8 # ADL_SDK_3.0/include/adl_defines.h:406
ADL_DISPLAY_CV_DONGLE_1080I = 16 # ADL_SDK_3.0/include/adl_defines.h:407
ADL_DISPLAY_CV_DONGLE_1080P = 32 # ADL_SDK_3.0/include/adl_defines.h:408
ADL_DISPLAY_CV_DONGLE_16_9 = 64 # ADL_SDK_3.0/include/adl_defines.h:409
ADL_DISPLAY_CV_DONGLE_720P50 = 128 # ADL_SDK_3.0/include/adl_defines.h:410
ADL_DISPLAY_CV_DONGLE_1080I25 = 256 # ADL_SDK_3.0/include/adl_defines.h:411
ADL_DISPLAY_CV_DONGLE_576I25 = 512 # ADL_SDK_3.0/include/adl_defines.h:412
ADL_DISPLAY_CV_DONGLE_576P50 = 1024 # ADL_SDK_3.0/include/adl_defines.h:413
ADL_DISPLAY_CV_DONGLE_1080P24 = 2048 # ADL_SDK_3.0/include/adl_defines.h:414
ADL_DISPLAY_CV_DONGLE_1080P25 = 4096 # ADL_SDK_3.0/include/adl_defines.h:415
ADL_DISPLAY_CV_DONGLE_1080P30 = 8192 # ADL_SDK_3.0/include/adl_defines.h:416
ADL_DISPLAY_CV_DONGLE_1080P50 = 16384 # ADL_SDK_3.0/include/adl_defines.h:417
ADL_DISPLAY_FORMAT_FORCE_720P = 1 # ADL_SDK_3.0/include/adl_defines.h:429
ADL_DISPLAY_FORMAT_FORCE_1080I = 2 # ADL_SDK_3.0/include/adl_defines.h:430
ADL_DISPLAY_FORMAT_FORCE_1080P = 4 # ADL_SDK_3.0/include/adl_defines.h:431
ADL_DISPLAY_FORMAT_FORCE_720P50 = 8 # ADL_SDK_3.0/include/adl_defines.h:432
ADL_DISPLAY_FORMAT_FORCE_1080I25 = 16 # ADL_SDK_3.0/include/adl_defines.h:433
ADL_DISPLAY_FORMAT_FORCE_576I25 = 32 # ADL_SDK_3.0/include/adl_defines.h:434
ADL_DISPLAY_FORMAT_FORCE_576P50 = 64 # ADL_SDK_3.0/include/adl_defines.h:435
ADL_DISPLAY_FORMAT_FORCE_1080P24 = 128 # ADL_SDK_3.0/include/adl_defines.h:436
ADL_DISPLAY_FORMAT_FORCE_1080P25 = 256 # ADL_SDK_3.0/include/adl_defines.h:437
ADL_DISPLAY_FORMAT_FORCE_1080P30 = 512 # ADL_SDK_3.0/include/adl_defines.h:438
ADL_DISPLAY_FORMAT_FORCE_1080P50 = 1024 # ADL_SDK_3.0/include/adl_defines.h:439
ADL_DISPLAY_FORMAT_CVDONGLEOVERIDE = 1 # ADL_SDK_3.0/include/adl_defines.h:444
ADL_DISPLAY_FORMAT_CVMODEUNDERSCAN = 2 # ADL_SDK_3.0/include/adl_defines.h:445
ADL_DISPLAY_FORMAT_FORCECONNECT_SUPPORTED = 4 # ADL_SDK_3.0/include/adl_defines.h:446
ADL_DISPLAY_FORMAT_RESTRICT_FORMAT_SELECTION = 8 # ADL_SDK_3.0/include/adl_defines.h:447
ADL_DISPLAY_FORMAT_SETASPECRATIO = 16 # ADL_SDK_3.0/include/adl_defines.h:448
ADL_DISPLAY_FORMAT_FORCEMODES = 32 # ADL_SDK_3.0/include/adl_defines.h:449
ADL_DISPLAY_FORMAT_LCDRTCCOEFF = 64 # ADL_SDK_3.0/include/adl_defines.h:450
ADL_PM_PARAM_DONT_CHANGE = 0 # ADL_SDK_3.0/include/adl_defines.h:456
ADL_BUSTYPE_PCI = 0 # ADL_SDK_3.0/include/adl_defines.h:462
ADL_BUSTYPE_AGP = 1 # ADL_SDK_3.0/include/adl_defines.h:463
ADL_BUSTYPE_PCIE = 2 # ADL_SDK_3.0/include/adl_defines.h:464
ADL_BUSTYPE_PCIE_GEN2 = 3 # ADL_SDK_3.0/include/adl_defines.h:465
ADL_STEREO_SUPPORTED = 4 # ADL_SDK_3.0/include/adl_defines.h:478
ADL_STEREO_BLUE_LINE = 8 # ADL_SDK_3.0/include/adl_defines.h:481
ADL_STEREO_OFF = 0 # ADL_SDK_3.0/include/adl_defines.h:484
ADL_STEREO_ACTIVE = 2 # ADL_SDK_3.0/include/adl_defines.h:487
ADL_STEREO_AUTO_HORIZONTAL = 1073741824 # ADL_SDK_3.0/include/adl_defines.h:490
ADL_STEREO_AUTO_VERTICAL = 2147483648 # ADL_SDK_3.0/include/adl_defines.h:493
ADL_STEREO_PASSIVE = 64 # ADL_SDK_3.0/include/adl_defines.h:496
ADL_STEREO_PASSIVE_HORIZ = 128 # ADL_SDK_3.0/include/adl_defines.h:499
ADL_STEREO_PASSIVE_VERT = 256 # ADL_SDK_3.0/include/adl_defines.h:502
ADL_WORKSTATION_LOADBALANCING_SUPPORTED = 1 # ADL_SDK_3.0/include/adl_defines.h:507
ADL_WORKSTATION_LOADBALANCING_AVAILABLE = 2 # ADL_SDK_3.0/include/adl_defines.h:510
ADL_WORKSTATION_LOADBALANCING_DISABLED = 0 # ADL_SDK_3.0/include/adl_defines.h:514
ADL_WORKSTATION_LOADBALANCING_ENABLED = 1 # ADL_SDK_3.0/include/adl_defines.h:517
ADL_CONTEXT_SPEED_UNFORCED = 0 # ADL_SDK_3.0/include/adl_defines.h:526
ADL_CONTEXT_SPEED_FORCEHIGH = 1 # ADL_SDK_3.0/include/adl_defines.h:527
ADL_CONTEXT_SPEED_FORCELOW = 2 # ADL_SDK_3.0/include/adl_defines.h:528
ADL_ADAPTER_SPEEDCAPS_SUPPORTED = 1 # ADL_SDK_3.0/include/adl_defines.h:530
ADL_GLSYNC_PORT_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:542
ADL_GLSYNC_PORT_BNC = 1 # ADL_SDK_3.0/include/adl_defines.h:545
ADL_GLSYNC_PORT_RJ45PORT1 = 2 # ADL_SDK_3.0/include/adl_defines.h:548
ADL_GLSYNC_PORT_RJ45PORT2 = 3 # ADL_SDK_3.0/include/adl_defines.h:551
ADL_GLSYNC_CONFIGMASK_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:558
ADL_GLSYNC_CONFIGMASK_SIGNALSOURCE = 1 # ADL_SDK_3.0/include/adl_defines.h:561
ADL_GLSYNC_CONFIGMASK_SYNCFIELD = 2 # ADL_SDK_3.0/include/adl_defines.h:564
ADL_GLSYNC_CONFIGMASK_SAMPLERATE = 4 # ADL_SDK_3.0/include/adl_defines.h:567
ADL_GLSYNC_CONFIGMASK_SYNCDELAY = 8 # ADL_SDK_3.0/include/adl_defines.h:570
ADL_GLSYNC_CONFIGMASK_TRIGGEREDGE = 16 # ADL_SDK_3.0/include/adl_defines.h:573
ADL_GLSYNC_CONFIGMASK_SCANRATECOEFF = 32 # ADL_SDK_3.0/include/adl_defines.h:576
ADL_GLSYNC_CONFIGMASK_FRAMELOCKCNTL = 64 # ADL_SDK_3.0/include/adl_defines.h:579
ADL_GLSYNC_FRAMELOCKCNTL_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:587
ADL_GLSYNC_FRAMELOCKCNTL_ENABLE = 1 # ADL_SDK_3.0/include/adl_defines.h:590
ADL_GLSYNC_FRAMELOCKCNTL_DISABLE = 2 # ADL_SDK_3.0/include/adl_defines.h:592
ADL_GLSYNC_FRAMELOCKCNTL_SWAP_COUNTER_RESET = 4 # ADL_SDK_3.0/include/adl_defines.h:593
ADL_GLSYNC_FRAMELOCKCNTL_SWAP_COUNTER_ACK = 8 # ADL_SDK_3.0/include/adl_defines.h:594
ADL_GLSYNC_FRAMELOCKCNTL_STATE_ENABLE = 1 # ADL_SDK_3.0/include/adl_defines.h:596
ADL_GLSYNC_COUNTER_SWAP = 1 # ADL_SDK_3.0/include/adl_defines.h:600
ADL_GLSYNC_SIGNALSOURCE_UNDEFINED = 256 # ADL_SDK_3.0/include/adl_defines.h:607
ADL_GLSYNC_SIGNALSOURCE_FREERUN = 257 # ADL_SDK_3.0/include/adl_defines.h:610
ADL_GLSYNC_SIGNALSOURCE_BNCPORT = 258 # ADL_SDK_3.0/include/adl_defines.h:613
ADL_GLSYNC_SIGNALSOURCE_RJ45PORT1 = 259 # ADL_SDK_3.0/include/adl_defines.h:616
ADL_GLSYNC_SIGNALSOURCE_RJ45PORT2 = 260 # ADL_SDK_3.0/include/adl_defines.h:619
ADL_GLSYNC_SIGNALTYPE_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:627
ADL_GLSYNC_SIGNALTYPE_480I = 1 # ADL_SDK_3.0/include/adl_defines.h:630
ADL_GLSYNC_SIGNALTYPE_576I = 2 # ADL_SDK_3.0/include/adl_defines.h:633
ADL_GLSYNC_SIGNALTYPE_480P = 3 # ADL_SDK_3.0/include/adl_defines.h:636
ADL_GLSYNC_SIGNALTYPE_576P = 4 # ADL_SDK_3.0/include/adl_defines.h:639
ADL_GLSYNC_SIGNALTYPE_720P = 5 # ADL_SDK_3.0/include/adl_defines.h:642
ADL_GLSYNC_SIGNALTYPE_1080P = 6 # ADL_SDK_3.0/include/adl_defines.h:645
ADL_GLSYNC_SIGNALTYPE_1080I = 7 # ADL_SDK_3.0/include/adl_defines.h:648
ADL_GLSYNC_SIGNALTYPE_SDI = 8 # ADL_SDK_3.0/include/adl_defines.h:651
ADL_GLSYNC_SIGNALTYPE_TTL = 9 # ADL_SDK_3.0/include/adl_defines.h:654
ADL_GLSYNC_SIGNALTYPE_ANALOG = 10 # ADL_SDK_3.0/include/adl_defines.h:657
ADL_GLSYNC_SYNCFIELD_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:664
ADL_GLSYNC_SYNCFIELD_BOTH = 1 # ADL_SDK_3.0/include/adl_defines.h:667
ADL_GLSYNC_SYNCFIELD_1 = 2 # ADL_SDK_3.0/include/adl_defines.h:670
ADL_GLSYNC_TRIGGEREDGE_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:678
ADL_GLSYNC_TRIGGEREDGE_RISING = 1 # ADL_SDK_3.0/include/adl_defines.h:681
ADL_GLSYNC_TRIGGEREDGE_FALLING = 2 # ADL_SDK_3.0/include/adl_defines.h:684
ADL_GLSYNC_TRIGGEREDGE_BOTH = 3 # ADL_SDK_3.0/include/adl_defines.h:687
ADL_GLSYNC_SCANRATECOEFF_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:695
ADL_GLSYNC_SCANRATECOEFF_x5 = 1 # ADL_SDK_3.0/include/adl_defines.h:698
ADL_GLSYNC_SCANRATECOEFF_x4 = 2 # ADL_SDK_3.0/include/adl_defines.h:701
ADL_GLSYNC_SCANRATECOEFF_x3 = 3 # ADL_SDK_3.0/include/adl_defines.h:704
ADL_GLSYNC_SCANRATECOEFF_x5_DIV_2 = 4 # ADL_SDK_3.0/include/adl_defines.h:707
ADL_GLSYNC_SCANRATECOEFF_x2 = 5 # ADL_SDK_3.0/include/adl_defines.h:710
ADL_GLSYNC_SCANRATECOEFF_x3_DIV_2 = 6 # ADL_SDK_3.0/include/adl_defines.h:713
ADL_GLSYNC_SCANRATECOEFF_x5_DIV_4 = 7 # ADL_SDK_3.0/include/adl_defines.h:716
ADL_GLSYNC_SCANRATECOEFF_x1 = 8 # ADL_SDK_3.0/include/adl_defines.h:719
ADL_GLSYNC_SCANRATECOEFF_x4_DIV_5 = 9 # ADL_SDK_3.0/include/adl_defines.h:722
ADL_GLSYNC_SCANRATECOEFF_x2_DIV_3 = 10 # ADL_SDK_3.0/include/adl_defines.h:725
ADL_GLSYNC_SCANRATECOEFF_x1_DIV_2 = 11 # ADL_SDK_3.0/include/adl_defines.h:728
ADL_GLSYNC_SCANRATECOEFF_x2_DIV_5 = 12 # ADL_SDK_3.0/include/adl_defines.h:731
ADL_GLSYNC_SCANRATECOEFF_x1_DIV_3 = 13 # ADL_SDK_3.0/include/adl_defines.h:734
ADL_GLSYNC_SCANRATECOEFF_x1_DIV_4 = 14 # ADL_SDK_3.0/include/adl_defines.h:737
ADL_GLSYNC_SCANRATECOEFF_x1_DIV_5 = 15 # ADL_SDK_3.0/include/adl_defines.h:740
ADL_GLSYNC_PORTSTATE_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:748
ADL_GLSYNC_PORTSTATE_NOCABLE = 1 # ADL_SDK_3.0/include/adl_defines.h:751
ADL_GLSYNC_PORTSTATE_IDLE = 2 # ADL_SDK_3.0/include/adl_defines.h:754
ADL_GLSYNC_PORTSTATE_INPUT = 3 # ADL_SDK_3.0/include/adl_defines.h:757
ADL_GLSYNC_PORTSTATE_OUTPUT = 4 # ADL_SDK_3.0/include/adl_defines.h:760
ADL_GLSYNC_LEDTYPE_BNC = 0 # ADL_SDK_3.0/include/adl_defines.h:768
ADL_GLSYNC_LEDTYPE_RJ45_LEFT = 0 # ADL_SDK_3.0/include/adl_defines.h:771
ADL_GLSYNC_LEDTYPE_RJ45_RIGHT = 1 # ADL_SDK_3.0/include/adl_defines.h:774
ADL_GLSYNC_LEDCOLOR_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:782
ADL_GLSYNC_LEDCOLOR_NOLIGHT = 1 # ADL_SDK_3.0/include/adl_defines.h:785
ADL_GLSYNC_LEDCOLOR_YELLOW = 2 # ADL_SDK_3.0/include/adl_defines.h:788
ADL_GLSYNC_LEDCOLOR_RED = 3 # ADL_SDK_3.0/include/adl_defines.h:791
ADL_GLSYNC_LEDCOLOR_GREEN = 4 # ADL_SDK_3.0/include/adl_defines.h:794
ADL_GLSYNC_LEDCOLOR_FLASH_GREEN = 5 # ADL_SDK_3.0/include/adl_defines.h:797
ADL_GLSYNC_PORTCNTL_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:805
ADL_GLSYNC_PORTCNTL_OUTPUT = 1 # ADL_SDK_3.0/include/adl_defines.h:808
ADL_GLSYNC_MODECNTL_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:816
ADL_GLSYNC_MODECNTL_GENLOCK = 1 # ADL_SDK_3.0/include/adl_defines.h:819
ADL_GLSYNC_MODECNTL_TIMINGSERVER = 2 # ADL_SDK_3.0/include/adl_defines.h:822
ADL_GLSYNC_MODECNTL_STATUS_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:828
ADL_GLSYNC_MODECNTL_STATUS_GENLOCK = 1 # ADL_SDK_3.0/include/adl_defines.h:831
ADL_GLSYNC_MODECNTL_STATUS_SETMODE_REQUIRED = 2 # ADL_SDK_3.0/include/adl_defines.h:834
ADL_GLSYNC_MODECNTL_STATUS_GENLOCK_ALLOWED = 4 # ADL_SDK_3.0/include/adl_defines.h:837
ADL_MAX_GLSYNC_PORTS = 8 # ADL_SDK_3.0/include/adl_defines.h:839
ADL_MAX_GLSYNC_PORT_LEDS = 8 # ADL_SDK_3.0/include/adl_defines.h:840
ADL_XFIREX_STATE_NOINTERCONNECT = 1 # ADL_SDK_3.0/include/adl_defines.h:849
ADL_XFIREX_STATE_DOWNGRADEPIPES = 2 # ADL_SDK_3.0/include/adl_defines.h:850
ADL_XFIREX_STATE_DOWNGRADEMEM = 4 # ADL_SDK_3.0/include/adl_defines.h:851
ADL_XFIREX_STATE_REVERSERECOMMENDED = 8 # ADL_SDK_3.0/include/adl_defines.h:852
ADL_XFIREX_STATE_3DACTIVE = 16 # ADL_SDK_3.0/include/adl_defines.h:853
ADL_XFIREX_STATE_MASTERONSLAVE = 32 # ADL_SDK_3.0/include/adl_defines.h:854
ADL_XFIREX_STATE_NODISPLAYCONNECT = 64 # ADL_SDK_3.0/include/adl_defines.h:855
ADL_XFIREX_STATE_NOPRIMARYVIEW = 128 # ADL_SDK_3.0/include/adl_defines.h:856
ADL_XFIREX_STATE_DOWNGRADEVISMEM = 256 # ADL_SDK_3.0/include/adl_defines.h:857
ADL_XFIREX_STATE_LESSTHAN8LANE_MASTER = 512 # ADL_SDK_3.0/include/adl_defines.h:858
ADL_XFIREX_STATE_LESSTHAN8LANE_SLAVE = 1024 # ADL_SDK_3.0/include/adl_defines.h:859
ADL_XFIREX_STATE_PEERTOPEERFAILED = 2048 # ADL_SDK_3.0/include/adl_defines.h:860
ADL_XFIREX_STATE_MEMISDOWNGRADED = 65536 # ADL_SDK_3.0/include/adl_defines.h:861
ADL_XFIREX_STATE_PIPESDOWNGRADED = 131072 # ADL_SDK_3.0/include/adl_defines.h:862
ADL_XFIREX_STATE_XFIREXACTIVE = 262144 # ADL_SDK_3.0/include/adl_defines.h:863
ADL_XFIREX_STATE_VISMEMISDOWNGRADED = 524288 # ADL_SDK_3.0/include/adl_defines.h:864
ADL_XFIREX_STATE_INVALIDINTERCONNECTION = 1048576 # ADL_SDK_3.0/include/adl_defines.h:865
ADL_XFIREX_STATE_NONP2PMODE = 2097152 # ADL_SDK_3.0/include/adl_defines.h:866
ADL_XFIREX_STATE_DOWNGRADEMEMBANKS = 4194304 # ADL_SDK_3.0/include/adl_defines.h:867
ADL_XFIREX_STATE_MEMBANKSDOWNGRADED = 8388608 # ADL_SDK_3.0/include/adl_defines.h:868
ADL_XFIREX_STATE_DUALDISPLAYSALLOWED = 16777216 # ADL_SDK_3.0/include/adl_defines.h:869
ADL_XFIREX_STATE_P2P_APERTURE_MAPPING = 33554432 # ADL_SDK_3.0/include/adl_defines.h:870
ADL_XFIREX_STATE_P2PFLUSH_REQUIRED = 33554432 # ADL_SDK_3.0/include/adl_defines.h:871
ADL_XFIREX_STATE_XSP_CONNECTED = 67108864 # ADL_SDK_3.0/include/adl_defines.h:872
ADL_XFIREX_STATE_ENABLE_CF_REBOOT_REQUIRED = 134217728 # ADL_SDK_3.0/include/adl_defines.h:873
ADL_XFIREX_STATE_DISABLE_CF_REBOOT_REQUIRED = 268435456 # ADL_SDK_3.0/include/adl_defines.h:874
ADL_XFIREX_STATE_DRV_HANDLE_DOWNGRADE_KEY = 536870912 # ADL_SDK_3.0/include/adl_defines.h:875
ADL_XFIREX_STATE_CF_RECONFIG_REQUIRED = 1073741824 # ADL_SDK_3.0/include/adl_defines.h:876
ADL_XFIREX_STATE_ERRORGETTINGSTATUS = 2147483648 # ADL_SDK_3.0/include/adl_defines.h:877
ADL_DISPLAY_PIXELFORMAT_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:897
ADL_DISPLAY_PIXELFORMAT_RGB = 1 # ADL_SDK_3.0/include/adl_defines.h:898
ADL_DISPLAY_PIXELFORMAT_YCRCB444 = 2 # ADL_SDK_3.0/include/adl_defines.h:899
ADL_DISPLAY_PIXELFORMAT_YCRCB422 = 4 # ADL_SDK_3.0/include/adl_defines.h:901
ADL_DISPLAY_PIXELFORMAT_RGB_LIMITED_RANGE = 8 # ADL_SDK_3.0/include/adl_defines.h:903
ADL_DISPLAY_PIXELFORMAT_RGB_FULL_RANGE = 1 # ADL_SDK_3.0/include/adl_defines.h:904
ADL_DL_DISPLAYCONFIG_CONTYPE_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:915
ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_JP = 1 # ADL_SDK_3.0/include/adl_defines.h:916
ADL_DL_DISPLAYCONFIG_CONTYPE_CV_JPN = 2 # ADL_SDK_3.0/include/adl_defines.h:917
ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NA = 3 # ADL_SDK_3.0/include/adl_defines.h:918
ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_NA = 4 # ADL_SDK_3.0/include/adl_defines.h:919
ADL_DL_DISPLAYCONFIG_CONTYPE_VGA = 5 # ADL_SDK_3.0/include/adl_defines.h:920
ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_D = 6 # ADL_SDK_3.0/include/adl_defines.h:921
ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_I = 7 # ADL_SDK_3.0/include/adl_defines.h:922
ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_A = 8 # ADL_SDK_3.0/include/adl_defines.h:923
ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_B = 9 # ADL_SDK_3.0/include/adl_defines.h:924
ADL_DL_DISPLAYCONFIG_CONTYPE_DISPLAYPORT = 10 # ADL_SDK_3.0/include/adl_defines.h:925
ADL_DISPLAY_DISPLAYINFO_DISPLAYCONNECTED = 1 # ADL_SDK_3.0/include/adl_defines.h:944
ADL_DISPLAY_DISPLAYINFO_DISPLAYMAPPED = 2 # ADL_SDK_3.0/include/adl_defines.h:945
ADL_DISPLAY_DISPLAYINFO_NONLOCAL = 4 # ADL_SDK_3.0/include/adl_defines.h:946
ADL_DISPLAY_DISPLAYINFO_FORCIBLESUPPORTED = 8 # ADL_SDK_3.0/include/adl_defines.h:947
ADL_DISPLAY_DISPLAYINFO_GENLOCKSUPPORTED = 16 # ADL_SDK_3.0/include/adl_defines.h:948
ADL_DISPLAY_DISPLAYINFO_MULTIVPU_SUPPORTED = 32 # ADL_SDK_3.0/include/adl_defines.h:949
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_SINGLE = 256 # ADL_SDK_3.0/include/adl_defines.h:951
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_CLONE = 512 # ADL_SDK_3.0/include/adl_defines.h:952
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2VSTRETCH = 1024 # ADL_SDK_3.0/include/adl_defines.h:956
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2HSTRETCH = 2048 # ADL_SDK_3.0/include/adl_defines.h:957
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_EXTENDED = 4096 # ADL_SDK_3.0/include/adl_defines.h:958
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCH1GPU = 65536 # ADL_SDK_3.0/include/adl_defines.h:962
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCHNGPU = 131072 # ADL_SDK_3.0/include/adl_defines.h:963
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED2 = 262144 # ADL_SDK_3.0/include/adl_defines.h:964
ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED3 = 524288 # ADL_SDK_3.0/include/adl_defines.h:965
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NOTACTIVE = 1 # ADL_SDK_3.0/include/adl_defines.h:985
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_SINGLE = 2 # ADL_SDK_3.0/include/adl_defines.h:986
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_CLONE = 4 # ADL_SDK_3.0/include/adl_defines.h:987
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCH1GPU = 8 # ADL_SDK_3.0/include/adl_defines.h:988
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCHNGPU = 16 # ADL_SDK_3.0/include/adl_defines.h:989
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2VSTRETCH = 32 # ADL_SDK_3.0/include/adl_defines.h:993
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2HSTRETCH = 64 # ADL_SDK_3.0/include/adl_defines.h:994
ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_EXTENDED = 128 # ADL_SDK_3.0/include/adl_defines.h:995
ADL_ADAPTER_DISPLAYCAP_PREFERDISPLAY_SUPPORTED = 256 # ADL_SDK_3.0/include/adl_defines.h:997
ADL_ADAPTER_DISPLAYCAP_BEZEL_SUPPORTED = 512 # ADL_SDK_3.0/include/adl_defines.h:998
ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED = 1 # ADL_SDK_3.0/include/adl_defines.h:1011
ADL_DISPLAY_DISPLAYMAP_MANNER_NOTACTIVE = 2 # ADL_SDK_3.0/include/adl_defines.h:1012
ADL_DISPLAY_DISPLAYMAP_MANNER_SINGLE = 4 # ADL_SDK_3.0/include/adl_defines.h:1013
ADL_DISPLAY_DISPLAYMAP_MANNER_CLONE = 8 # ADL_SDK_3.0/include/adl_defines.h:1014
ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED1 = 16 # ADL_SDK_3.0/include/adl_defines.h:1015
ADL_DISPLAY_DISPLAYMAP_MANNER_HSTRETCH = 32 # ADL_SDK_3.0/include/adl_defines.h:1017
ADL_DISPLAY_DISPLAYMAP_MANNER_VSTRETCH = 64 # ADL_SDK_3.0/include/adl_defines.h:1018
ADL_DISPLAY_DISPLAYMAP_MANNER_VLD = 128 # ADL_SDK_3.0/include/adl_defines.h:1019
ADL_DISPLAY_DISPLAYMAP_OPTION_GPUINFO = 1 # ADL_SDK_3.0/include/adl_defines.h:1034
ADL_DISPLAY_DISPLAYTARGET_PREFERRED = 1 # ADL_SDK_3.0/include/adl_defines.h:1046
ADL_DISPLAY_POSSIBLEMAPRESULT_VALID = 1 # ADL_SDK_3.0/include/adl_defines.h:1058
ADL_DISPLAY_POSSIBLEMAPRESULT_BEZELSUPPORTED = 2 # ADL_SDK_3.0/include/adl_defines.h:1059
ADL_DISPLAY_MODE_COLOURFORMAT_565 = 1 # ADL_SDK_3.0/include/adl_defines.h:1076
ADL_DISPLAY_MODE_COLOURFORMAT_8888 = 2 # ADL_SDK_3.0/include/adl_defines.h:1077
ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_000 = 4 # ADL_SDK_3.0/include/adl_defines.h:1078
ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_090 = 8 # ADL_SDK_3.0/include/adl_defines.h:1079
ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_180 = 16 # ADL_SDK_3.0/include/adl_defines.h:1080
ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_270 = 32 # ADL_SDK_3.0/include/adl_defines.h:1081
ADL_DISPLAY_MODE_REFRESHRATE_ROUNDED = 64 # ADL_SDK_3.0/include/adl_defines.h:1082
ADL_DISPLAY_MODE_REFRESHRATE_ONLY = 128 # ADL_SDK_3.0/include/adl_defines.h:1083
ADL_DISPLAY_MODE_PROGRESSIVE_FLAG = 0 # ADL_SDK_3.0/include/adl_defines.h:1085
ADL_DISPLAY_MODE_INTERLACED_FLAG = 2 # ADL_SDK_3.0/include/adl_defines.h:1086
ADL_OSMODEINFOXPOS_DEFAULT = -640 # ADL_SDK_3.0/include/adl_defines.h:1100
ADL_OSMODEINFOYPOS_DEFAULT = 0 # ADL_SDK_3.0/include/adl_defines.h:1101
ADL_OSMODEINFOXRES_DEFAULT = 640 # ADL_SDK_3.0/include/adl_defines.h:1102
ADL_OSMODEINFOYRES_DEFAULT = 480 # ADL_SDK_3.0/include/adl_defines.h:1103
ADL_OSMODEINFOXRES_DEFAULT800 = 800 # ADL_SDK_3.0/include/adl_defines.h:1104
ADL_OSMODEINFOYRES_DEFAULT600 = 600 # ADL_SDK_3.0/include/adl_defines.h:1105
ADL_OSMODEINFOREFRESHRATE_DEFAULT = 60 # ADL_SDK_3.0/include/adl_defines.h:1106
ADL_OSMODEINFOCOLOURDEPTH_DEFAULT = 8 # ADL_SDK_3.0/include/adl_defines.h:1107
ADL_OSMODEINFOCOLOURDEPTH_DEFAULT16 = 16 # ADL_SDK_3.0/include/adl_defines.h:1108
ADL_OSMODEINFOCOLOURDEPTH_DEFAULT24 = 24 # ADL_SDK_3.0/include/adl_defines.h:1109
ADL_OSMODEINFOCOLOURDEPTH_DEFAULT32 = 32 # ADL_SDK_3.0/include/adl_defines.h:1110
ADL_OSMODEINFOORIENTATION_DEFAULT = 0 # ADL_SDK_3.0/include/adl_defines.h:1111
ADL_OSMODEFLAG_DEFAULT = 0 # ADL_SDK_3.0/include/adl_defines.h:1113
ADL_I2C_MAJOR_API_REV = 1 # ADL_SDK_3.0/include/adl_defines.h:1193
ADL_I2C_MINOR_DEFAULT_API_REV = 0 # ADL_SDK_3.0/include/adl_defines.h:1194
ADL_I2C_MINOR_OEM_API_REV = 1 # ADL_SDK_3.0/include/adl_defines.h:1195
ADL_DL_I2C_LINE_OEM = 1 # ADL_SDK_3.0/include/adl_defines.h:1199
ADL_DL_I2C_LINE_OD_CONTROL = 2 # ADL_SDK_3.0/include/adl_defines.h:1200
ADL_DL_I2C_LINE_OEM2 = 3 # ADL_SDK_3.0/include/adl_defines.h:1201
ADL_DL_I2C_MAXDATASIZE = 64 # ADL_SDK_3.0/include/adl_defines.h:1205
ADL_DL_I2C_MAXWRITEDATASIZE = 12 # ADL_SDK_3.0/include/adl_defines.h:1206
ADL_DL_I2C_MAXADDRESSLENGTH = 6 # ADL_SDK_3.0/include/adl_defines.h:1207
ADL_DL_I2C_MAXOFFSETLENGTH = 4 # ADL_SDK_3.0/include/adl_defines.h:1208
ADL_DL_DISPLAYPROPERTY_TYPE_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:1213
ADL_DL_DISPLAYPROPERTY_TYPE_EXPANSIONMODE = 1 # ADL_SDK_3.0/include/adl_defines.h:1214
ADL_DL_DISPLAYPROPERTY_TYPE_USEUNDERSCANSCALING = 2 # ADL_SDK_3.0/include/adl_defines.h:1215
ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_CENTER = 0 # ADL_SDK_3.0/include/adl_defines.h:1219
ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_FULLSCREEN = 1 # ADL_SDK_3.0/include/adl_defines.h:1220
ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_ASPECTRATIO = 2 # ADL_SDK_3.0/include/adl_defines.h:1221
ADL_DL_DISPLAY_DITHER_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:1225
ADL_DL_DISPLAY_DITHER_DISABLED = 1 # ADL_SDK_3.0/include/adl_defines.h:1226
ADL_DL_DISPLAY_DITHER_ENABLED = 2 # ADL_SDK_3.0/include/adl_defines.h:1227
ADL_MAX_EDIDDATA_SIZE = 256 # ADL_SDK_3.0/include/adl_defines.h:1231
ADL_MAX_EDID_EXTENSION_BLOCKS = 3 # ADL_SDK_3.0/include/adl_defines.h:1233
ADL_DL_CONTROLLER_OVERLAY_ALPHA = 0 # ADL_SDK_3.0/include/adl_defines.h:1235
ADL_DL_CONTROLLER_OVERLAY_ALPHAPERPIX = 1 # ADL_SDK_3.0/include/adl_defines.h:1236
ADL_DL_DISPLAY_DATA_PACKET__INFO_PACKET_RESET = 0 # ADL_SDK_3.0/include/adl_defines.h:1238
ADL_DL_DISPLAY_DATA_PACKET__INFO_PACKET_SET = 1 # ADL_SDK_3.0/include/adl_defines.h:1239
ADL_DL_DISPLAY_DATA_PACKET__TYPE__AVI = 1 # ADL_SDK_3.0/include/adl_defines.h:1245
ADL_DL_DISPLAY_DATA_PACKET__TYPE__RESERVED = 2 # ADL_SDK_3.0/include/adl_defines.h:1246
ADL_DL_DISPLAY_DATA_PACKET__TYPE__VENDORINFO = 4 # ADL_SDK_3.0/include/adl_defines.h:1247
ADL_GAMUT_MATRIX_SD = 1 # ADL_SDK_3.0/include/adl_defines.h:1253
ADL_GAMUT_MATRIX_HD = 2 # ADL_SDK_3.0/include/adl_defines.h:1255
ADL_DL_CLOCKINFO_FLAG_FULLSCREEN3DONLY = 1 # ADL_SDK_3.0/include/adl_defines.h:1264
ADL_DL_CLOCKINFO_FLAG_ALWAYSFULLSCREEN3D = 2 # ADL_SDK_3.0/include/adl_defines.h:1265
ADL_DL_CLOCKINFO_FLAG_VPURECOVERYREDUCED = 4 # ADL_SDK_3.0/include/adl_defines.h:1266
ADL_DL_CLOCKINFO_FLAG_THERMALPROTECTION = 8 # ADL_SDK_3.0/include/adl_defines.h:1267
ADL_DL_POWERXPRESS_GPU_INTEGRATED = 1 # ADL_SDK_3.0/include/adl_defines.h:1275
ADL_DL_POWERXPRESS_GPU_DISCRETE = 2 # ADL_SDK_3.0/include/adl_defines.h:1276
ADL_DL_POWERXPRESS_SWITCH_RESULT_STARTED = 1 # ADL_SDK_3.0/include/adl_defines.h:1282
ADL_DL_POWERXPRESS_SWITCH_RESULT_DECLINED = 2 # ADL_SDK_3.0/include/adl_defines.h:1284
ADL_DL_POWERXPRESS_SWITCH_RESULT_ALREADY = 3 # ADL_SDK_3.0/include/adl_defines.h:1286
ADL_DL_POWERXPRESS_VERSION_MAJOR = 2 # ADL_SDK_3.0/include/adl_defines.h:1293
ADL_DL_POWERXPRESS_VERSION_MINOR = 0 # ADL_SDK_3.0/include/adl_defines.h:1295
ADL_DL_POWERXPRESS_VERSION = 131072 # ADL_SDK_3.0/include/adl_defines.h:1297
ADL_DL_THERMAL_DOMAIN_OTHER = 0 # ADL_SDK_3.0/include/adl_defines.h:1301
ADL_DL_THERMAL_DOMAIN_GPU = 1 # ADL_SDK_3.0/include/adl_defines.h:1302
ADL_DL_THERMAL_FLAG_INTERRUPT = 1 # ADL_SDK_3.0/include/adl_defines.h:1306
ADL_DL_THERMAL_FLAG_FANCONTROL = 2 # ADL_SDK_3.0/include/adl_defines.h:1307
ADL_DL_FANCTRL_SUPPORTS_PERCENT_READ = 1 # ADL_SDK_3.0/include/adl_defines.h:1315
ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE = 2 # ADL_SDK_3.0/include/adl_defines.h:1316
ADL_DL_FANCTRL_SUPPORTS_RPM_READ = 4 # ADL_SDK_3.0/include/adl_defines.h:1317
ADL_DL_FANCTRL_SUPPORTS_RPM_WRITE = 8 # ADL_SDK_3.0/include/adl_defines.h:1318
ADL_DL_FANCTRL_SPEED_TYPE_PERCENT = 1 # ADL_SDK_3.0/include/adl_defines.h:1324
ADL_DL_FANCTRL_SPEED_TYPE_RPM = 2 # ADL_SDK_3.0/include/adl_defines.h:1325
ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED = 1 # ADL_SDK_3.0/include/adl_defines.h:1329
ADL_DL_MAX_MVPU_ADAPTERS = 4 # ADL_SDK_3.0/include/adl_defines.h:1333
MVPU_ADAPTER_0 = 1 # ADL_SDK_3.0/include/adl_defines.h:1334
MVPU_ADAPTER_1 = 2 # ADL_SDK_3.0/include/adl_defines.h:1335
MVPU_ADAPTER_2 = 4 # ADL_SDK_3.0/include/adl_defines.h:1336
MVPU_ADAPTER_3 = 8 # ADL_SDK_3.0/include/adl_defines.h:1337
ADL_DL_MAX_REGISTRY_PATH = 256 # ADL_SDK_3.0/include/adl_defines.h:1338
ADL_DL_MVPU_STATUS_OFF = 0 # ADL_SDK_3.0/include/adl_defines.h:1342
ADL_DL_MVPU_STATUS_ON = 1 # ADL_SDK_3.0/include/adl_defines.h:1343
ADL_ASIC_UNDEFINED = 0 # ADL_SDK_3.0/include/adl_defines.h:1353
ADL_ASIC_DISCRETE = 1 # ADL_SDK_3.0/include/adl_defines.h:1354
ADL_ASIC_INTEGRATED = 2 # ADL_SDK_3.0/include/adl_defines.h:1355
ADL_ASIC_FIREGL = 4 # ADL_SDK_3.0/include/adl_defines.h:1356
ADL_ASIC_FIREMV = 8 # ADL_SDK_3.0/include/adl_defines.h:1357
ADL_ASIC_XGP = 16 # ADL_SDK_3.0/include/adl_defines.h:1358
ADL_ASIC_FUSION = 32 # ADL_SDK_3.0/include/adl_defines.h:1359
ADL_DL_TIMINGFLAG_DOUBLE_SCAN = 1 # ADL_SDK_3.0/include/adl_defines.h:1369
ADL_DL_TIMINGFLAG_INTERLACED = 2 # ADL_SDK_3.0/include/adl_defines.h:1370
ADL_DL_TIMINGFLAG_H_SYNC_POLARITY = 4 # ADL_SDK_3.0/include/adl_defines.h:1371
ADL_DL_TIMINGFLAG_V_SYNC_POLARITY = 8 # ADL_SDK_3.0/include/adl_defines.h:1372
ADL_DL_MODETIMING_STANDARD_CVT = 1 # ADL_SDK_3.0/include/adl_defines.h:1382
ADL_DL_MODETIMING_STANDARD_GTF = 2 # ADL_SDK_3.0/include/adl_defines.h:1384
ADL_DL_MODETIMING_STANDARD_DMT = 4 # ADL_SDK_3.0/include/adl_defines.h:1386
ADL_DL_MODETIMING_STANDARD_CUSTOM = 8 # ADL_SDK_3.0/include/adl_defines.h:1388
ADL_DL_MODETIMING_STANDARD_DRIVER_DEFAULT = 16 # ADL_SDK_3.0/include/adl_defines.h:1390
ADL_DL_MODETIMING_STANDARD_CVT_RB = 32
ADL_XSERVERINFO_XINERAMAACTIVE = 1 # ADL_SDK_3.0/include/adl_defines.h:1406
ADL_XSERVERINFO_RANDR12SUPPORTED = 2 # ADL_SDK_3.0/include/adl_defines.h:1412
ADL_CONTROLLERVECTOR_0 = 1 # ADL_SDK_3.0/include/adl_defines.h:1422
ADL_CONTROLLERVECTOR_1 = 2 # ADL_SDK_3.0/include/adl_defines.h:1424
ADL_DISPLAY_SLSGRID_ORIENTATION_000 = 1 # ADL_SDK_3.0/include/adl_defines.h:1427
ADL_DISPLAY_SLSGRID_ORIENTATION_090 = 2 # ADL_SDK_3.0/include/adl_defines.h:1428
ADL_DISPLAY_SLSGRID_ORIENTATION_180 = 4 # ADL_SDK_3.0/include/adl_defines.h:1429
ADL_DISPLAY_SLSGRID_ORIENTATION_270 = 8 # ADL_SDK_3.0/include/adl_defines.h:1430
ADL_DISPLAY_SLSGRID_CAP_OPTION_RELATIVETO_LANDSCAPE = 1 # ADL_SDK_3.0/include/adl_defines.h:1431
ADL_DISPLAY_SLSGRID_CAP_OPTION_RELATIVETO_CURRENTANGLE = 2 # ADL_SDK_3.0/include/adl_defines.h:1432
ADL_DISPLAY_SLSGRID_PORTAIT_MODE = 4 # ADL_SDK_3.0/include/adl_defines.h:1433
ADL_DISPLAY_SLSMAPCONFIG_GET_OPTION_RELATIVETO_LANDSCAPE = 1 # ADL_SDK_3.0/include/adl_defines.h:1436
ADL_DISPLAY_SLSMAPCONFIG_GET_OPTION_RELATIVETO_CURRENTANGLE = 2 # ADL_SDK_3.0/include/adl_defines.h:1437
ADL_DISPLAY_SLSMAPCONFIG_CREATE_OPTION_RELATIVETO_LANDSCAPE = 1 # ADL_SDK_3.0/include/adl_defines.h:1439
ADL_DISPLAY_SLSMAPCONFIG_CREATE_OPTION_RELATIVETO_CURRENTANGLE = 2 # ADL_SDK_3.0/include/adl_defines.h:1440
ADL_DISPLAY_SLSMAPCONFIG_REARRANGE_OPTION_RELATIVETO_LANDSCAPE = 1 # ADL_SDK_3.0/include/adl_defines.h:1442
ADL_DISPLAY_SLSMAPCONFIG_REARRANGE_OPTION_RELATIVETO_CURRENTANGLE = 2 # ADL_SDK_3.0/include/adl_defines.h:1443
ADL_DISPLAY_SLSGRID_RELATIVETO_LANDSCAPE = 16 # ADL_SDK_3.0/include/adl_defines.h:1446
ADL_DISPLAY_SLSGRID_RELATIVETO_CURRENTANGLE = 32 # ADL_SDK_3.0/include/adl_defines.h:1447
ADL_DISPLAY_SLSMAP_BEZELMODE = 16 # ADL_SDK_3.0/include/adl_defines.h:1452
ADL_DISPLAY_SLSMAP_DISPLAYARRANGED = 2 # ADL_SDK_3.0/include/adl_defines.h:1455
ADL_DISPLAY_SLSMAP_CURRENTCONFIG = 4 # ADL_SDK_3.0/include/adl_defines.h:1458
ADL_DISPLAY_SLSMAPINDEXLIST_OPTION_ACTIVE = 1 # ADL_SDK_3.0/include/adl_defines.h:1462
ADL_DISPLAY_BEZELOFFSET_STEPBYSTEPSET = 4 # ADL_SDK_3.0/include/adl_defines.h:1466
ADL_DISPLAY_BEZELOFFSET_COMMIT = 8 # ADL_SDK_3.0/include/adl_defines.h:1467
# http://forums.amd.com/forum/messageview.cfm?catid=347&threadid=144777
ADL_DL_I2C_LINE_OEM=0x00000001
ADL_DL_I2C_LINE_OD_CONTROL=0x00000002
ADL_DL_I2C_LINE_OEM2=0x00000003
|
mjmvisser/adl3
|
adl3/adl_defines.py
|
Python
|
mit
| 39,284
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_rebel_tier3_ezkiel.iff"
result.attribute_template_id = 9
result.stfName("npc_name","ishi_tib_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_space_rebel_tier3_ezkiel.py
|
Python
|
mit
| 452
|
import ConfigParser
import StringIO
from test_support import TestFailed, verify
def basic(src):
print "Testing basic accessors..."
cf = ConfigParser.ConfigParser()
sio = StringIO.StringIO(src)
cf.readfp(sio)
L = cf.sections()
L.sort()
verify(L == [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Section\with$weird%characters[' '\t',
r'Spacey Bar',
],
"unexpected list of section names")
# The use of spaces in the section names serves as a regression test for
# SourceForge bug #115357.
# http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357
verify(cf.get('Foo Bar', 'foo', raw=1) == 'bar')
verify(cf.get('Spacey Bar', 'foo', raw=1) == 'bar')
verify(cf.get('Commented Bar', 'foo', raw=1) == 'bar')
verify('__name__' not in cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
verify(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existance of option")
verify(not cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
verify(not cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existance of option"
" that was removed")
try:
cf.remove_option('No Such Section', 'foo')
except ConfigParser.NoSectionError:
pass
else:
raise TestFailed(
"remove_option() failed to report non-existance of option"
" that never existed")
def case_sensitivity():
print "Testing case sensitivity..."
cf = ConfigParser.ConfigParser()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
verify(L == ["A", "a"])
cf.set("a", "B", "value")
verify(cf.options("a") == ["b"])
verify(cf.get("a", "b", raw=1) == "value",
"could not locate option, expecting case-insensitive option names")
verify(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
verify(cf.has_option("A", opt),
"has_option() returned false for option which should exist")
verify(cf.options("A") == ["a-b"])
verify(cf.options("a") == ["b"])
cf.remove_option("a", "B")
verify(cf.options("a") == [])
# SF bug #432369:
cf = ConfigParser.ConfigParser()
sio = StringIO.StringIO("[MySection]\nOption: first line\n\tsecond line\n")
cf.readfp(sio)
verify(cf.options("MySection") == ["option"])
verify(cf.get("MySection", "Option") == "first line\nsecond line")
def boolean(src):
print "Testing interpretation of boolean Values..."
cf = ConfigParser.ConfigParser()
sio = StringIO.StringIO(src)
cf.readfp(sio)
for x in range(1, 5):
verify(cf.getboolean('BOOLTEST', 't%d' % (x)) == 1)
for x in range(1, 5):
verify(cf.getboolean('BOOLTEST', 'f%d' % (x)) == 0)
for x in range(1, 5):
try:
cf.getboolean('BOOLTEST', 'e%d' % (x))
except ValueError:
pass
else:
raise TestFailed(
"getboolean() failed to report a non boolean value")
def interpolation(src):
print "Testing value interpolation..."
cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"})
sio = StringIO.StringIO(src)
cf.readfp(sio)
verify(cf.get("Foo", "getname") == "Foo")
verify(cf.get("Foo", "bar") == "something with interpolation (1 step)")
verify(cf.get("Foo", "bar9")
== "something with lots of interpolation (9 steps)")
verify(cf.get("Foo", "bar10")
== "something with lots of interpolation (10 steps)")
expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11")
def parse_errors():
print "Testing parse errors..."
expect_parse_error(ConfigParser.ParsingError,
"""[Foo]\n extra-spaces: splat\n""")
expect_parse_error(ConfigParser.ParsingError,
"""[Foo]\n extra-spaces= splat\n""")
expect_parse_error(ConfigParser.ParsingError,
"""[Foo]\noption-without-value\n""")
expect_parse_error(ConfigParser.ParsingError,
"""[Foo]\n:value-without-option-name\n""")
expect_parse_error(ConfigParser.ParsingError,
"""[Foo]\n=value-without-option-name\n""")
expect_parse_error(ConfigParser.MissingSectionHeaderError,
"""No Section!\n""")
def query_errors():
print "Testing query interface..."
cf = ConfigParser.ConfigParser()
verify(cf.sections() == [],
"new ConfigParser should have no defined sections")
verify(not cf.has_section("Foo"),
"new ConfigParser should have no acknowledged sections")
try:
cf.options("Foo")
except ConfigParser.NoSectionError, e:
pass
else:
raise TestFailed(
"Failed to catch expected NoSectionError from options()")
try:
cf.set("foo", "bar", "value")
except ConfigParser.NoSectionError, e:
pass
else:
raise TestFailed("Failed to catch expected NoSectionError from set()")
expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar")
def weird_errors():
print "Testing miscellaneous error conditions..."
cf = ConfigParser.ConfigParser()
cf.add_section("Foo")
try:
cf.add_section("Foo")
except ConfigParser.DuplicateSectionError, e:
pass
else:
raise TestFailed("Failed to catch expected DuplicateSectionError")
def expect_get_error(cf, exctype, section, option, raw=0):
try:
cf.get(section, option, raw=raw)
except exctype, e:
pass
else:
raise TestFailed("Failed to catch expected " + exctype.__name__)
def expect_parse_error(exctype, src):
cf = ConfigParser.ConfigParser()
sio = StringIO.StringIO(src)
try:
cf.readfp(sio)
except exctype, e:
pass
else:
raise TestFailed("Failed to catch expected " + exctype.__name__)
basic(r"""
[Foo Bar]
foo=bar
[Spacey Bar]
foo = bar
[Commented Bar]
foo: bar ; comment
[Section\with$weird%characters[""" '\t' r"""]
[Internationalized Stuff]
foo[bg]: Bulgarian
foo=Default
foo[en]=English
foo[de]=Deutsch
""")
case_sensitivity()
boolean(r"""
[BOOLTEST]
T1=1
T2=TRUE
T3=True
T4=oN
T5=yes
F1=0
F2=FALSE
F3=False
F4=oFF
F5=nO
E1=2
E2=foo
E3=-1
E4=0.1
E5=FALSE AND MORE
""")
interpolation(r"""
[Foo]
bar=something %(with1)s interpolation (1 step)
bar9=something %(with9)s lots of interpolation (9 steps)
bar10=something %(with10)s lots of interpolation (10 steps)
bar11=something %(with11)s lots of interpolation (11 steps)
with11=%(with10)s
with10=%(with9)s
with9=%(with8)s
with8=%(with7)s
with7=%(with6)s
with6=%(with5)s
with5=%(with4)s
with4=%(with3)s
with3=%(with2)s
with2=%(with1)s
with1=with
[Mutual Recursion]
foo=%(bar)s
bar=%(foo)s
""")
parse_errors()
query_errors()
weird_errors()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_cfgparser.py
|
Python
|
mit
| 7,274
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/chassis/shared_hutt_medium_s02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/space/chassis/shared_hutt_medium_s02.py
|
Python
|
mit
| 458
|
#The new version is in polishNotation2.py. Use that version instead of using this version.
#To do:
#Find out how to split a string using matches of a regular expression as the separator.
#Test everything in polyglotCodeGenerator.py
#Use re.match(expr, stringToSplit).groups() to split a string with its parameters:
#http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output
from pyparsing import OneOrMore, nestedExpr
import re
def splitParameterString(theString):
toFilter = re.compile("(<<(?:[^\s]+)>>)").split(theString)
return filter(lambda a: a != '', toFilter)
def getRegexFromString(theString):
theSplitString = splitParameterString(theString)
for x in range(0, len(theSplitString)):
if theSplitString[x].startswith("<<") and theSplitString[x].endswith(">>"):
theSplitString[x] = "([^\s]+)"
return re.compile("".join(theSplitString))
def splitStatement(theRegex, stringToSplit):
return re.match(theRegex, stringToSplit).groups()
def getThingToCheckAgainstRegex(theArray):
theCounter = 0
toReturn = ""
for idx, current in enumerate(theArray):
if(idx != 0):
toReturn += " "
if (type(current) != str or (type(current) == str) and (("'" in current) or ('"' in current))):
theCounter += 1
toReturn += "<<" + str(theCounter) + ">>"
else:
toReturn += current
return toReturn
stringToTest = "(replace(?: each| every|)) <<foo>> (in|inside(?: of)|within) <<bar>> (with) <<baz>>"
theRegex = getRegexFromString(stringToTest)
print(splitParameterString(stringToTest))
print(splitStatement(theRegex, "replace (a) in b with c"))
print(splitStatement(theRegex, "replace a within b with c"))
print(splitStatement(theRegex, "replace a inside of b with c"))
print(splitStatement(theRegex, "replace every a in b with c"))
#I'm still working on crossLanguageParser.py, but I'm trying to see if I can get this new syntax to work.
#This is supposed to be a re-write of crossLanguageParser.py, using Polish notation.
#evaluateMacro is the main function here.
#print(getThingToCheckAgainstRegex(["the", "type", "of", ["foo", "goo"], "is", "'bar'"]))
def isParameter(theString):
if theString.startswith("<<") and theString.endswith(">>"):
return True
arrayOfOutputs = [
[["<<type>> [ <<dimensions>> ] <<name>> = <<initialValue>>", "<<type>> <<name>> [ <<dimensions>> ] = <<initialValue>>"], "initializeVar('<<name>>', '<<type>>', <<initialValue>>, <<dimensions>>)", "final"],
[["<<type>> <<name>> = <<initialValue>>"], "(<<type>> [ None ] <<name>> = <<initialValue>>)"],
#def initializeVar(variableName, variableType, initialValue, arrayDimensions):
[["def <<isStatic>> <<returnType>> <<functionName>> <<parameterNames>> <<parameterTypes>> <<body>>"], "getFunction('<<functionName>>', '<<isStatic>>', <<parameterNames>>, <<parameterTypes>>, '<<returnType>>', <<body>>)", "final"],
[["return <<toReturn>>",], "Return(<<toReturn>>)", "final"],
[["while <<condition>> <<action>>"], "whileLoop([<<action>>], <<condition>>)", "final"],
[["switch <<condition>> <<action>>",], "switch(<<condition>>, [<<action>>])", "final"],
[["case <<condition>> <<action>>"], "case(<<condition>>, [<<action>>])", "final"],
[["else <<action>>", "else { <<action>> }"], "Else([<<action>>])", "final"],
[["if <<condition>> then <<output>>", "<<output>> unless <<condition>> is false", "if <<condition>> { <<output>> }", "<<output>> if <<condition>>", "<<output>> if and only if <<condition>>", "if <<condition>> <<output>>"], "If(<<condition>>, [<<output>>])", "final"],
[["elif <<condition>> <<action>>", "else if <<condition>> then <<action>>"], "Elif(<<condition>>, [<<action>>])", "final"],
[["<<param1>> ; <<param2>>", "<<param1>> , <<param2>>"], "<<param1>>,<<param2>>", "final"],
[["<<param1>> ;", "<<param1>> ,"], "<<param1>>,", "final"],
[["module <<body>>"], "module([<<body>>])", "final"],
[["main <<body>>"], "main([<<body>>])", "final"],
[["<<value1>> == <<value2>>", "<<value1>> is <<value2>>", "<<value1>> equals <<value2>>", "<<value1>> is equal to <<value2>>"], "equals(<<value1>>, <<value2>>, 'int')", "final"],
[["<<item>> is in <<array>>", "<<array>> contains <<item>>"], "(<<item>> in <<array>>)", "final"],
#If it ends in "final", then the output string is directly returned.
[["not <<x>>", "! <<x>>"], "Not(<<x>>)", "final"],
[["replace each <<contained>> in <<container>> with <<replacement>>", "replace every <<contained>> in <<container>> with <<replacement>>"], "replace each <<contained>> in <<container>> with <<replacement>>", "final"],
#If there are only 3 items in the array, then the output is translated into another macro
[["unless <<condition>> <<action>>", "<<action>> unless <<condition>>"], "(if (not <<condition>>) then <<action>>)"],
[["while <<condition>> <<action>>", "<<action>> while <<condition>>", "do <<action>> while <<condition>> is true", "<<action>> until <<condition>> becomes false"], "while(<<condition>>){<<action>>}", "final"],
#"eval" means the output string will be directly evaluated.
[["<<thing1>> means <<thing2>>"], "addToArray(<<thing1>>, <<thing2>>)", "eval"],
[["<<functionName>> { <<parameterList>> }"], "callFunction('<<functionName>>', None, [<<parameterList>>])", "final"],
[["<<param1>> + <<param2>>", "<<param1>> plus <<param2>>"], "add([<<param1>>, <<param2>>])", "final"],
[["<<param1>> - <<param2>>"], "subtract(<<param1>>, <<param2>>)", "final"],
[["<<param1>> * <<param2>>"], "multiply(<<param1>>, <<param2>>)", "final"],
[["<<param1>> / <<param2>>", "<<param1>> divided by <<param2>>"], "divide(<<param1>>, <<param2>>)", "final"],
[["<<param1>> % <<param2>>"], "Mod([<<param1>>, <<param2>>])", "final"],
[["<<param1>> or <<param2>>", "<<param1>> || <<param2>>"], "Or(<<param1>>, <<param2>>)", "final"],
[["<<param1>> > <<param2>>", "<<param1>> is greater than <<param2>>"], "greaterThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> < <<param2>>", "<<param1>> is less than <<param2>>>>"], "lessThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> <= <<param2>>"], "lessThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> >= <<param2>>"], "greaterThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> and <<param2>>", "<<param1>> && <<param2>>" "<<param1>> & <<param2>>"], "And(<<param1>>, <<param2>>)", "final"],
[["class <<className>> { <<body>> }",], "getClass(<<className>>, <<body>>)", "final"],
#def getClass(className, body):
[["<<param>> ++"], "(<<param>> += 1)"],
[["<<param>> --"], "(<<param>> -= 1)"],
[["seriesOfStatements <<param>>", "series of statements <<param>>"], "seriesOfStatements([<<param>>])", "final"],
[["<<param1>> += <<param2>>"], "(<<param1>> = (<<param1>> + <<param2>>))"],
[["<<param1>> -= <<param2>>"], "(<<param1>> = (<<param1>> - <<param2>>))"],
[["<<param1>> *= <<param2>>"], "(<<param1>> = (<<param1>> * <<param2>>))"],
[["<<param1>> ^= <<param2>>"], "(<<param1>> = (<<param1>> ^ <<param2>>))"],
[["<<param1>> = <<param2>>"], "setVar(<<param2>>, <<param1>>)", "final"],
#def setVar(valueToGet, valueToChange):
[["for <<initializer>> <<condition>> <<increment>> <<action>>", "for <<initializer>> ; <<condition>> ; <<increment>> { <<action>> }"], "forLoop(<<action>>, <<initializer>>, <<condition>>, <<increment>>)", "final"],
#def forLoop(body, initializer, condition, increment):
[["for <<variable>> from <<start>> to <<end>> <<action>>"], "(for [ (<<variable>> = <<start>>) ; (<<variable>> < <<end>>) ; (<<variable>> ++) ] { <<action>> })"],
[["<<param1>> ^ <<param2>>", "<<param1>> to the power of <<param2>>", "param1 ** param2"], "<<param1>>^<<param2>>", "final"],
[["[ <<param>> ]"], "[<<param>>]", "final"],
[["<<className>> . <<methodName>> { <<methodParameters>> }"], "<<className>>.<<methodName>>(<<methodParameters>>)", "final"]
]
def addToArray(thing1, thing2):
global arrayOfOutputs
thing2 = ("(" + thing2 + ")")
thing2 = list(OneOrMore(nestedExpr()).parseString(thing2)[0])
thing1 = thing1.split(" ")
arrayOfOutputs += [[thing1, thing2]]
for idx1, current1 in enumerate(arrayOfOutputs):
currentStringOutput = current1[1]
for idx2, current2 in enumerate(current1[0]):
current1[0][idx2] = current1[0][idx2].split(" ")
if(len(current1) == 2):
current1[1] = OneOrMore(nestedExpr()).parseString(currentStringOutput)[0]
#print(arrayOfOutputs)
def compareStringLists(listWithParameters, listDefiningThePattern):
if(len(listWithParameters) != len(listDefiningThePattern)):
return False
for idx, current in enumerate(listWithParameters):
if(not isParameter(listDefiningThePattern[idx])):
if(not (listWithParameters[idx] == listDefiningThePattern[idx])):
return False
return True
def replaceInMultiDimensionalArray(theArray, toReplace, newReplace):
for idx, current in enumerate(theArray):
if current == toReplace:
theArray[idx] = newReplace
if type(current) != str:
theArray[idx] = replaceInMultiDimensionalArray(current, toReplace, newReplace)
return theArray
#print(replaceInMultiDimensionalArray(['hello', 'dude', ['lol', 'lol', 'hello', ['woo', 'hello', 'woo']]], 'hello', 'hoohoo'))
#print(getRegexStringFromArray(["Hi", ["lol", 1, 2, "what is this"]]))
def putInsideArray(theArray, startingIndex, endingIndex):
theSubArray = theArray[startingIndex:(endingIndex+1)]
theArray[startingIndex:endingIndex] = []
theArray[startingIndex] = theSubArray
return theArray
#print(putInsideArray([1,3,3,4,5], 1, 3))
#requires putInsideArray and compareStringLists
#This surrounds the last match of the pattern with parentheses.
def putPatternInsideArray(theArray, thePattern):
for current in reversed(range(0,len(theArray))):
theTestArray = theArray[current:(current+len(thePattern))]
if(compareStringLists(theTestArray, thePattern) == True):
return putInsideArray(theArray, current, current+len(thePattern)-1)
#print(putPatternInsideArray(["hello", "hello", ["woop", "woop"]], ["hello", "<<derp>>"]))
#print(range(0, 5))
arrayToCheck = ["lol", "wut", "hello", "world", "lol"]
#print(putPatternInsideArray(arrayToCheck, ["hello", "world"]))
#print(putPatternInsideArray(arrayToCheck, ["wut", "<<testingThing>>", "lol"]))
def putFirstPatternInsideArray(theArray, thePatterns):
firstLength = len(theArray)
for current in thePatterns:
putPatternInsideArray(theArray, current)
if(len(theArray) != firstLength):
return theArray
def putEveryPatternInsideArray(theArray):
arrayOfPatterns = []
for current in arrayOfOutputs:
arrayOfPatterns += current[0]
arrayOfPatterns.sort(key=len)
arrayOfPatterns = arrayOfPatterns[::-1]
#print(arrayOfPatterns)
while True:
oldArrayLength = len(theArray)
putFirstPatternInsideArray(theArray, arrayOfPatterns)
if(len(theArray) == oldArrayLength):
break;
if(len(theArray) == 1):
return theArray[0]
else:
return theArray
putEveryPatternInsideArray(["hi", "lol"])
def evaluateMacro(theList):
theList = list(theList)
theList = putEveryPatternInsideArray(theList)
#print(theList)
if (len(theList) == 1):
return evaluateMacro(theList[0])
#print regexString
for idx, currentOutputArray in enumerate(arrayOfOutputs):
currentOutputString = currentOutputArray[1]
currentSplitStringList = currentOutputArray[0]
for idx1, currentSplitStringList1 in enumerate(currentSplitStringList):
if(compareStringLists(theList, currentSplitStringList1)):
currentSplitStringList = currentSplitStringList[idx1]
toReturn = currentOutputString
if((len(currentOutputArray) == 3)):
for idx2, currentParameter in enumerate(theList):
if type(currentParameter) != str:
theList[idx2] = evaluateMacro(currentParameter)
if isParameter(currentSplitStringList[idx2]):
toReturn = toReturn.replace(currentSplitStringList[idx2], theList[idx2])
if currentOutputArray[2] == "final":
return toReturn
elif currentOutputArray[2] == "eval":
exec toReturn;
return ""
else:
for idx2, currentParameter in enumerate(theList):
if isParameter(currentSplitStringList[idx2]):
toReturn = replaceInMultiDimensionalArray(toReturn, currentSplitStringList[idx2], currentParameter)
#print(toReturn)
return evaluateMacro(toReturn)
raise Exception(str(theList) + " does not match any pattern.")
#evaluateMacro(OneOrMore(nestedExpr()).parseString("('gorp <<toPrint>>' means 'print <<toPrint>>')")[0])
#evaluateMacro(OneOrMore(nestedExpr()).parseString("('<<action>> unless <<condition>>' means 'if (not <<condition>>) then <<action>>')")[0])
#print("The output is " + evaluateMacro(OneOrMore(nestedExpr()).parseString("(gorp 1)")[0]))
#print(arrayOfOutputs[len(arrayOfOutputs) - 1])
#print(arrayOfOutputs[4])
def printOutput(theInput):
print("The output of "+theInput+" is " + evaluateMacro(OneOrMore(nestedExpr()).parseString(theInput)[0]))
def getStringOutput(theInput):
return evaluateMacro(OneOrMore(nestedExpr()).parseString("(" + theInput + ")")[0])
#printOutput("(x is in arr1)")
#printOutput("(arr1 contains 'a string')")
#printOutput("(if (x equals true) then (print { goodbye }) (else print { hello }))")
#printOutput(
'''
(
(if (x == 3)
(print { x + 1 })
(else if (x == 4) then
(print { x ^ 5 })
(else
print { x + 3 + 4 }
)
)
)
)
'''#)
#printOutput("(foo { ([ 3 , 4 , 5 , 6 , 5 , 4 ]) })")
#printOutput("(print { sum { product { lol * derp } } })")
#printOutput(
'''
((print { x }) if and only if
(x is true)
(else
print { hello }
)
)
'''
#)
#printOutput("([ 1 , ([ 2 , 7 , 9 ]), 3 ])")
#printOutput("(function paramNames paramTypes (return 3))")
#printOutput("(class HelloWorld { print { hello } })")
#printOutput("(foo plus bar plus baz)")
#printOutput("(foo < bar < baz)")
#printOutput("(foo is greater than bar)")
#printOutput("(foo >= bar >= baz)")
#printOutput("(foo <= bar <= baz)")
#printOutput("(foo ++)")
#printOutput("(foo --)")
#printOutput("(foo to the power of 3)")
#printOutput("([ 1 , ([ 1 , 2 , 3 , ([ lol , derp , hee , hoo ]) ]) , 1 ])")
#printOutput(
'''
(for (i = 0) ; (i < 10) ; (i ++) {
(print { i })
})
'''
#)
#printOutput(
'''
(for i from 1 to 10 {
(print { i } ; print { (i + 1) } ; print { (i to the power of 2) } ;)
})
'''
#)
#printOutput("(lol . derp { hi })")
#printOutput(
'''(
main
print { hello } ; print { derp } ; print { i + 1 + 3 } ;
)'''#)
#semicolon(semicolon(print(hello)), semicolon(print(derp)), print(add(i, add(1, 3))))
|
jarble/EngScript
|
libraries/polishNotation.py
|
Python
|
mit
| 14,320
|
from __future__ import unicode_literals, division, absolute_import
import copy
import datetime
from math import ceil
from flask import jsonify
from flask import request
from flask_restplus import inputs
from sqlalchemy.orm.exc import NoResultFound
from flexget.api import api, APIResource, ApiClient
from flexget.event import fire_event
from flexget.plugin import PluginError
from flexget.plugins.filter import series
series_api = api.namespace('series', description='Flexget Series operations')
default_error_schema = {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'message': {'type': 'string'}
}
}
default_error_schema = api.schema('default_error_schema', default_error_schema)
empty_response = api.schema('empty', {'type': 'object'})
begin_object = {
'type': 'object',
'properties': {
'episode_id': {'type': 'integer'},
'episode_identifier': {'type': 'string'}
}
}
release_object = {
'type': 'object',
'properties': {
'release_id': {'type': 'integer'},
'release_title': {'type': 'string'},
'release_downloaded': {'type': 'string'},
'release_quality': {'type': 'string'},
'release_proper_count': {'type': 'integer'},
'release_first_seen': {'type': 'string', 'format': 'date-time'},
'release_episode_id': {'type': 'integer'}
}
}
release_schema = {
'type': 'object',
'properties': {
'show': {'type': 'string'},
'show_id': {'type': 'integer'},
'episode_id': {'type': 'integer'},
'release': release_object
}
}
release_schema = api.schema('release_schema', release_schema)
release_list_schema = {
'type': 'object',
'properties': {
'releases': {
'type': 'array',
'items': release_object
},
'number_of_releases': {'type': 'integer'},
'episode_id': {'type': 'integer'},
'show_id': {'type': 'integer'}
}
}
release_list_schema = api.schema('release_list_schema', release_list_schema)
latest_object = {
'type': 'object',
'properties': {
'episode_id': {'type': 'integer'},
'episode_identifier': {'type': 'string'},
'episode_age': {'type': 'string'},
'number_of_episodes_behind': {'type': 'integer'},
'downloaded_releases': {
'type': 'array',
'items': release_object
}
}
}
episode_object = {
'type': 'object',
'properties': {
"episode_first_seen": {'type': 'string', 'format': 'date-time'},
"episode_id": {'type': 'string'},
"episode_identified_by": {'type': 'string'},
"episode_identifier": {'type': 'string'},
"episode_premiere_type": {'type': 'string'},
"episode_number": {'type': 'string'},
"episode_season": {'type': 'string'},
"episode_series_id": {'type': 'string'},
"episode_number_of_releases": {'type': 'integer'}
}
}
show_object = {
'type': 'object',
'properties': {
'show_id': {'type': 'integer'},
'show_name': {'type': 'string'},
'alternate_names': {'type': 'array', 'items': {'type': 'string'}},
'begin_episode': begin_object,
'latest_downloaded_episode': latest_object,
'in_tasks': {'type': 'array', 'items': {'type': 'string'}}
}
}
series_list_schema = {
'type': 'object',
'properties': {
'shows': {
'type': 'array',
'items': show_object
},
'total_number_of_shows': {'type': 'integer'},
'page_size': {'type': 'integer'},
'total_number_of_pages': {'type': 'integer'},
'page': {'type': 'integer'}
}
}
series_list_schema = api.schema('list_series', series_list_schema)
episode_list_schema = {
'type': 'object',
'properties': {
'episodes': {
'type': 'array',
'items': episode_object
},
'number_of_episodes': {'type': 'integer'},
'total_number_of_episodes': {'type': 'integer'},
'page': {'type': 'integer'},
'total_number_of_pages': {'type': 'integer'},
'show_id': {'type': 'integer'},
'show': {'type': 'string'}
}
}
episode_list_schema = api.schema('episode_list', episode_list_schema)
episode_schema = {
'type': 'object',
'properties': {
'episode': episode_object,
'show_id': {'type': 'integer'},
'show': {'type': 'string'}
}
}
episode_schema = api.schema('episode_item', episode_schema)
series_edit_object = {
'type': 'object',
'properties': {
'episode_identifier': {'type': 'string'},
'alternate_names': {'type': 'array', 'items': {'type': 'string'}}
},
'anyOf': [
{'required': ['episode_identifier']},
{'required': ['alternate_names']}
],
'additionalProperties:': False
}
series_edit_schema = api.schema('series_edit_schema', series_edit_object)
series_input_object = copy.deepcopy(series_edit_object)
series_input_object['properties']['series_name'] = {'type': 'string'}
del series_input_object['anyOf']
series_input_object['required'] = ['series_name']
series_input_schema = api.schema('series_input_schema', series_input_object)
release_object = {
'type': 'object',
'properties': {
'quality': {'type': 'string'},
'title': {'type': 'string'},
'proper_count': {'type': 'integer'},
'downloaded': {'type': 'boolean'}
}
}
episode_object = {
'type': 'object',
'properties': {
'identifier': {'type': 'string'},
'identifier_type': {'type': 'string'},
'download_age': {'type': 'string'},
'releases': {
'type': 'array',
'items': release_object}
}
}
show_details_schema = {
'type': 'object',
'properties': {
'episodes': {
'type': 'array',
'items': episode_object
},
'show': show_object
}
}
shows_schema = {
'type': 'object',
'properties': {
'shows': {
'type': 'array',
'items': show_object
},
'number_of_shows': {'type': 'integer'}
}
}
def get_release_details(release):
release_item = {
'release_id': release.id,
'release_title': release.title,
'release_downloaded': release.downloaded,
'release_quality': release.quality.name,
'release_proper_count': release.proper_count,
'release_first_seen': release.first_seen,
'release_episode_id': release.episode_id,
}
return release_item
def get_episode_details(episode):
episode_item = {
'episode_id': episode.id,
'episode_identifier': episode.identifier,
'episode_season': episode.season,
'episode_identified_by': episode.identified_by,
'episode_number': episode.number,
'episode_series_id': episode.series_id,
'episode_first_seen': episode.first_seen,
'episode_premiere_type': episode.is_premiere,
'episode_number_of_releases': len(episode.releases)
}
return episode_item
def get_series_details(show):
latest_ep = series.get_latest_release(show)
begin_ep = show.begin
if begin_ep:
begin_ep_id = begin_ep.id
begin_ep_identifier = begin_ep.identifier
else:
begin_ep_id = begin_ep_identifier = None
begin = {
'episode_id': begin_ep_id,
'episode_identifier': begin_ep_identifier
}
if latest_ep:
latest_ep_id = latest_ep.id
latest_ep_identifier = latest_ep.identifier
latest_ep_age = latest_ep.age
new_eps_after_latest_ep = series.new_eps_after(latest_ep)
release = get_release_details(
sorted(latest_ep.downloaded_releases,
key=lambda release: release.first_seen if release.downloaded else None, reverse=True)[0])
else:
latest_ep_id = latest_ep_identifier = latest_ep_age = new_eps_after_latest_ep = release = None
latest = {
'episode_id': latest_ep_id,
'episode_identifier': latest_ep_identifier,
'episode_age': latest_ep_age,
'number_of_episodes_behind': new_eps_after_latest_ep,
'last_downloaded_release': release
}
show_item = {
'show_id': show.id,
'show_name': show.name,
'alternate_names': [n.alt_name for n in show.alternate_names],
'begin_episode': begin,
'latest_downloaded_episode': latest,
'in_tasks': [_show.name for _show in show.in_tasks]
}
return show_item
show_details_schema = api.schema('show_details', show_details_schema)
shows_schema = api.schema('list_of_shows', shows_schema)
series_list_parser = api.parser()
series_list_parser.add_argument('in_config', choices=('configured', 'unconfigured', 'all'), default='configured',
help="Filter list if shows are currently in configuration.")
series_list_parser.add_argument('premieres', type=inputs.boolean, default=False,
help="Filter by downloaded premieres only.")
series_list_parser.add_argument('status', choices=('new', 'stale'), help="Filter by status")
series_list_parser.add_argument('days', type=int,
help="Filter status by number of days.")
series_list_parser.add_argument('page', type=int, default=1, help='Page number. Default is 1')
series_list_parser.add_argument('page_size', type=int, default=10, help='Shows per page. Max is 100.')
series_list_parser.add_argument('sort_by', choices=('show_name', 'episodes_behind_latest', 'last_download_date'),
default='show_name',
help="Sort response by attribute.")
series_list_parser.add_argument('order', choices=('desc', 'asc'), default='desc', help="Sorting order.")
series_list_parser.add_argument('lookup', choices=('tvdb', 'tvmaze'), action='append',
help="Get lookup result for every show by sending another request to lookup API")
ep_identifier_doc = "'episode_identifier' should be one of SxxExx, integer or date formatted such as 2012-12-12"
@series_api.route('/')
class SeriesListAPI(APIResource):
@api.response(404, 'Page does not exist', default_error_schema)
@api.response(200, 'Series list retrieved successfully', series_list_schema)
@api.doc(parser=series_list_parser, description="Get a list of Flexget's shows in DB")
def get(self, session=None):
""" List existing shows """
args = series_list_parser.parse_args()
page = args['page']
page_size = args['page_size']
lookup = args.get('lookup')
# Handle max size limit
if page_size > 100:
page_size = 100
sort_by = args['sort_by']
order = args['order']
# In case the default 'desc' order was received
if order == 'desc':
order = True
else:
order = False
start = page_size * (page - 1)
stop = start + page_size
kwargs = {
'configured': args.get('in_config'),
'premieres': args.get('premieres'),
'status': args.get('status'),
'days': args.get('days'),
'start': start,
'stop': stop,
'session': session
}
num_of_shows = series.get_series_summary(count=True, **kwargs)
raw_series_list = series.get_series_summary(**kwargs)
converted_series_list = [get_series_details(show) for show in raw_series_list]
sorted_show_list = []
if sort_by == 'show_name':
sorted_show_list = sorted(converted_series_list, key=lambda show: show['show_name'], reverse=order)
elif sort_by == 'episodes_behind_latest':
sorted_show_list = sorted(converted_series_list,
key=lambda show: show['latest_downloaded_episode']['number_of_episodes_behind'],
reverse=order)
elif sort_by == 'last_download_date':
sorted_show_list = sorted(converted_series_list,
key=lambda show: show['latest_downloaded_episode']['last_downloaded_release'][
'release_first_seen'] if show['latest_downloaded_episode'][
'last_downloaded_release'] else datetime.datetime(1970, 1, 1),
reverse=order)
pages = int(ceil(num_of_shows / float(page_size)))
if page > pages and pages != 0:
return {'error': 'page %s does not exist' % page}, 404
number_of_shows = min(page_size, num_of_shows)
response = {
'shows': sorted_show_list,
'page_size': number_of_shows,
'total_number_of_shows': num_of_shows,
'page': page,
'total_number_of_pages': pages
}
if lookup:
api_client = ApiClient()
for endpoint in lookup:
base_url = '/%s/series/' % endpoint
for show in response['shows']:
pos = response['shows'].index(show)
response['shows'][pos].setdefault('lookup', {})
url = base_url + show['show_name'] + '/'
result = api_client.get_endpoint(url)
response['shows'][pos]['lookup'].update({endpoint: result})
return jsonify(response)
@api.response(200, 'Adding series and setting first accepted episode to ep_id', show_details_schema)
@api.response(500, 'Show already exists', default_error_schema)
@api.response(501, 'Episode Identifier format is incorrect', default_error_schema)
@api.response(502, 'Alternate name already exist for a different show', default_error_schema)
@api.validate(series_input_schema, description=ep_identifier_doc)
def post(self, session):
""" Create a new show and set its first accepted episode and/or alternate names """
data = request.json
series_name = data.get('series_name')
normalized_name = series.normalize_series_name(series_name)
matches = series.shows_by_exact_name(normalized_name, session=session)
if matches:
return {'status': 'error',
'message': 'Show `%s` already exist in DB' % series_name
}, 500
show = series.Series()
show.name = series_name
session.add(show)
ep_id = data.get('episode_identifier')
alt_names = data.get('alternate_names')
if ep_id:
try:
series.set_series_begin(show, ep_id)
except ValueError as e:
return {'status': 'error',
'message': e.args[0]
}, 501
if alt_names:
try:
series.set_alt_names(alt_names, show, session)
except PluginError as e:
return {'status': 'error',
'message': e.value
}, 502
return jsonify(get_series_details(show))
@series_api.route('/search/<string:name>')
@api.doc(description='Searches for a show in the DB via its name. Returns a list of matching shows.')
class SeriesGetShowsAPI(APIResource):
@api.response(200, 'Show list retrieved successfully', shows_schema)
@api.doc(params={'name': 'Name of the show(s) to search'})
def get(self, name, session):
""" List of shows matching lookup name """
name = series.normalize_series_name(name)
matches = series.shows_by_name(name, session=session)
shows = []
for match in matches:
shows.append(get_series_details(match))
return jsonify({
'shows': shows,
'number_of_shows': len(shows)
})
@series_api.route('/<int:show_id>')
@api.doc(params={'show_id': 'ID of the show'})
class SeriesShowAPI(APIResource):
@api.response(404, 'Show ID not found', default_error_schema)
@api.response(200, 'Show information retrieved successfully', show_details_schema)
@api.doc(description='Get a specific show using its ID')
def get(self, show_id, session):
""" Get show details by ID """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
show = get_series_details(show)
return jsonify(show)
@api.response(200, 'Removed series from DB', empty_response)
@api.response(404, 'Show ID not found', default_error_schema)
@api.doc(description='Delete a specific show using its ID')
def delete(self, show_id, session):
""" Remove series from DB """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
name = show.name
try:
series.forget_series(name)
except ValueError as e:
return {'status': 'error',
'message': e.args[0]
}, 404
return {}
@api.response(200, 'Episodes for series will be accepted starting with ep_id', show_details_schema)
@api.response(404, 'Show ID not found', default_error_schema)
@api.response(501, 'Episode Identifier format is incorrect', default_error_schema)
@api.response(502, 'Alternate name already exist for a different show', default_error_schema)
@api.validate(series_edit_schema, description=ep_identifier_doc)
@api.doc(description='Set a begin episode or alternate names using a show ID. Note that alternate names override '
'the existing names (if name does not belong to a different show).')
def put(self, show_id, session):
""" Set the initial episode of an existing show """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
data = request.json
ep_id = data.get('episode_identifier')
alt_names = data.get('alternate_names')
if ep_id:
try:
series.set_series_begin(show, ep_id)
except ValueError as e:
return {'status': 'error',
'message': e.args[0]
}, 501
if alt_names:
try:
series.set_alt_names(alt_names, show, session)
except PluginError as e:
return {'status': 'error',
'message': e.value
}, 502
return jsonify(get_series_details(show))
episode_parser = api.parser()
episode_parser.add_argument('page', type=int, default=1, help='Page number. Default is 1')
episode_parser.add_argument('page_size', type=int, default=10, help='Shows per page. Max is 100.')
episode_parser.add_argument('order', choices=('desc', 'asc'), default='desc', help="Sorting order.")
@api.response(404, 'Show ID not found', default_error_schema)
@series_api.route('/<int:show_id>/episodes')
@api.doc(params={'show_id': 'ID of the show'})
class SeriesEpisodesAPI(APIResource):
@api.response(200, 'Episodes retrieved successfully for show', episode_list_schema)
@api.response(405, 'Page does not exists', model=default_error_schema)
@api.doc(description='Get all show episodes via its ID', parser=episode_parser)
def get(self, show_id, session):
""" Get episodes by show ID """
args = episode_parser.parse_args()
page = args['page']
page_size = args['page_size']
# Handle max size limit
if page_size > 100:
page_size = 100
order = args['order']
# In case the default 'desc' order was received
if order == 'desc':
order = True
else:
order = False
start = page_size * (page - 1)
stop = start + page_size
kwargs = {
'start': start,
'stop': stop,
'descending': order,
'session': session
}
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
count = series.show_episodes(show, count=True, session=session)
episodes = [get_episode_details(episode) for episode in series.show_episodes(show, **kwargs)]
pages = int(ceil(count / float(page_size)))
if page > pages and pages != 0:
return {'status': 'error',
'message': 'page does not exist' % show_id
}, 500
return jsonify({'show': show.name,
'show_id': show_id,
'number_of_episodes': len(episodes),
'episodes': episodes,
'total_number_of_episodes': count,
'page': page,
'total_number_of_pages': pages})
@api.response(500, 'Error when trying to forget episode', default_error_schema)
@api.response(200, 'Successfully forgotten all episodes from show', empty_response)
@api.doc(description='Delete all show episodes via its ID. Deleting an episode will mark it as wanted again')
def delete(self, show_id, session):
""" Forgets all episodes of a show"""
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
for episode in show.episodes:
try:
series.forget_episodes_by_id(show.id, episode.id)
except ValueError as e:
return {'status': 'error',
'message': e.args[0]
}, 500
return {}
delete_parser = api.parser()
delete_parser.add_argument('delete_seen', type=inputs.boolean, default=False,
help="Enabling this will delete all the related releases from seen entries list as well, "
"enabling to re-download them")
@api.response(404, 'Show ID not found', default_error_schema)
@api.response(414, 'Episode ID not found', default_error_schema)
@api.response(400, 'Episode with ep_ids does not belong to show with show_id', default_error_schema)
@series_api.route('/<int:show_id>/episodes/<int:ep_id>')
@api.doc(params={'show_id': 'ID of the show', 'ep_id': 'Episode ID'})
class SeriesEpisodeAPI(APIResource):
@api.response(200, 'Episode retrieved successfully for show', episode_schema)
@api.doc(description='Get a specific episode via its ID and show ID')
def get(self, show_id, ep_id, session):
""" Get episode by show ID and episode ID"""
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
return jsonify({
'show': show.name,
'show_id': show_id,
'episode': get_episode_details(episode)
})
@api.response(200, 'Episode successfully forgotten for show', empty_response)
@api.doc(description='Delete a specific episode via its ID and show ID. Deleting an episode will mark it as '
'wanted again',
parser=delete_parser)
def delete(self, show_id, ep_id, session):
""" Forgets episode by show ID and episode ID """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
args = delete_parser.parse_args()
if args.get('delete_seen'):
for release in episode.releases:
fire_event('forget', release.title)
series.forget_episodes_by_id(show_id, ep_id)
return {}
release_list_parser = api.parser()
release_list_parser.add_argument('downloaded', type=inputs.boolean, help='Filter between release status')
release_delete_parser = release_list_parser.copy()
release_delete_parser.add_argument('delete_seen', type=inputs.boolean, default=False,
help="Enabling this will delete all the related releases from seen entries list as well, "
"enabling to re-download them")
@api.response(404, 'Show ID not found', default_error_schema)
@api.response(414, 'Episode ID not found', default_error_schema)
@api.response(400, 'Episode with ep_ids does not belong to show with show_id', default_error_schema)
@series_api.route('/<int:show_id>/episodes/<int:ep_id>/releases')
@api.doc(params={'show_id': 'ID of the show', 'ep_id': 'Episode ID'},
description='Releases are any seen entries that match the episode. ')
class SeriesReleasesAPI(APIResource):
@api.response(200, 'Releases retrieved successfully for episode', release_list_schema)
@api.doc(description='Get all matching releases for a specific episode of a specific show.',
parser=release_list_parser)
def get(self, show_id, ep_id, session):
""" Get all episodes releases by show ID and episode ID """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
args = release_list_parser.parse_args()
downloaded = args.get('downloaded') == True if args.get('downloaded') is not None else None
release_items = []
for release in episode.releases:
if downloaded and release.downloaded or downloaded is False and not release.downloaded or not downloaded:
release_items.append(get_release_details(release))
return jsonify({
'releases': release_items,
'number_of_releases': len(release_items),
'episode_id': ep_id,
'show_id': show_id
})
@api.response(200, 'Successfully deleted all releases for episode', empty_response)
@api.doc(description='Delete all releases for a specific episode of a specific show.',
parser=release_delete_parser)
def delete(self, show_id, ep_id, session):
""" Deletes all episodes releases by show ID and episode ID """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
args = release_delete_parser.parse_args()
downloaded = args.get('downloaded') == True if args.get('downloaded') is not None else None
release_items = []
for release in episode.releases:
if downloaded and release.downloaded or downloaded is False and not release.downloaded or not downloaded:
release_items.append(release)
if args.get('delete_seen'):
fire_event('forget', release.title)
for release in release_items:
series.delete_release_by_id(release.id)
return {}
@api.response(200, 'Successfully reset all downloaded releases for episode', empty_response)
@api.doc(description='Resets all of the downloaded releases of an episode, clearing the quality to be downloaded '
'again,')
def put(self, show_id, ep_id, session):
""" Marks all downloaded releases as not downloaded """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
for release in episode.releases:
if release.downloaded:
release.downloaded = False
return {}
@api.response(404, 'Show ID not found', default_error_schema)
@api.response(414, 'Episode ID not found', default_error_schema)
@api.response(424, 'Release ID not found', default_error_schema)
@api.response(400, 'Episode with ep_id does not belong to show with show_id', default_error_schema)
@api.response(410, 'Release with rel_id does not belong to episode with ep_id', default_error_schema)
@series_api.route('/<int:show_id>/episodes/<int:ep_id>/releases/<int:rel_id>/')
@api.doc(params={'show_id': 'ID of the show', 'ep_id': 'Episode ID', 'rel_id': 'Release ID'})
class SeriesReleaseAPI(APIResource):
@api.response(200, 'Release retrieved successfully for episode', release_schema)
@api.doc(description='Get a specific downloaded release for a specific episode of a specific show')
def get(self, show_id, ep_id, rel_id, session):
''' Get episode release by show ID, episode ID and release ID '''
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
try:
release = series.release_by_id(rel_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Release with ID %s not found' % rel_id
}, 424
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
if not series.release_in_episode(ep_id, rel_id):
return {'status': 'error',
'message': 'Release id %s does not belong to episode %s' % (rel_id, ep_id)}, 410
return jsonify({
'show': show.name,
'show_id': show_id,
'episode_id': ep_id,
'release': get_release_details(release)
})
@api.response(200, 'Release successfully deleted', empty_response)
@api.doc(description='Delete a specific releases for a specific episode of a specific show.',
parser=delete_parser)
def delete(self, show_id, ep_id, rel_id, session):
''' Delete episode release by show ID, episode ID and release ID '''
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
try:
release = series.release_by_id(rel_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Release with ID %s not found' % rel_id
}, 424
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
if not series.release_in_episode(ep_id, rel_id):
return {'status': 'error',
'message': 'Release with id %s does not belong to episode %s' % (rel_id, ep_id)}, 410
args = delete_parser.parse_args()
if args.get('delete_seen'):
fire_event('forget', release.title)
series.delete_release_by_id(rel_id)
return {}
@api.response(200, 'Successfully reset downloaded release status', empty_response)
@api.response(500, 'Release is not marked as downloaded', default_error_schema)
@api.doc(description='Resets the downloaded release status, clearing the quality to be downloaded again')
def put(self, show_id, ep_id, rel_id, session):
""" Resets a downloaded release status """
try:
show = series.show_by_id(show_id, session=session)
except NoResultFound:
return {'status': 'error',
'message': 'Show with ID %s not found' % show_id
}, 404
try:
episode = series.episode_by_id(ep_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Episode with ID %s not found' % ep_id
}, 414
try:
release = series.release_by_id(rel_id, session)
except NoResultFound:
return {'status': 'error',
'message': 'Release with ID %s not found' % rel_id
}, 424
if not series.episode_in_show(show_id, ep_id):
return {'status': 'error',
'message': 'Episode with id %s does not belong to show %s' % (ep_id, show_id)}, 400
if not series.release_in_episode(ep_id, rel_id):
return {'status': 'error',
'message': 'Release with id %s does not belong to episode %s' % (rel_id, ep_id)}, 410
if not release.downloaded:
return {'status': 'error',
'message': 'Release with id %s is not set as downloaded' % rel_id}, 500
release.downloaded = False
return {}
|
antivirtel/Flexget
|
flexget/plugins/api/series.py
|
Python
|
mit
| 36,378
|
"""A few convenience functions to setup the Ising model in a TF.
TFIM stands for Ising model in a transverse field, i.e.:
.. math::
H=\sum_{i}\left[S^{z}_{i}S^{z}_{i+1} + h S^{x}_{i}\right)\right]
"""
class TranverseFieldIsingModel(object):
"""Implements a few convenience functions for the TFIM.
Does exactly that.
"""
def __init__(self, H = 0):
super(TranverseFieldIsingModel, self).__init__()
self.H = H
def set_hamiltonian(self, system):
"""Sets a system Hamiltonian to the TFIM Hamiltonian.
Does exactly this. If the system hamiltonian has some other terms on
it, there are not touched. So be sure to use this function only in
newly created `System` objects.
Parameters
----------
system : a System.
The System you want to set the Hamiltonain for.
"""
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('id', 'id', 's_z', 's_z', -1.)
system.add_to_hamiltonian('id', 's_z', 's_z', 'id', -1.)
system.add_to_hamiltonian('s_z', 's_z', 'id', 'id', -1.)
system.add_to_hamiltonian('id', 'id', 'id', 's_x', self.H)
system.add_to_hamiltonian('id', 'id', 's_x', 'id', self.H)
system.add_to_hamiltonian('id', 's_x', 'id', 'id', self.H)
system.add_to_hamiltonian('s_x', 'id', 'id', 'id', self.H)
def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
"""Sets the block Hamiltonian to be what you need for TFIM.
Parameters
----------
tmp_matrix_for_bh : a numpy array of ndim = 2.
An auxiliary matrix to keep track of the result.
system : a System.
The System you want to set the Hamiltonian for.
"""
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 's_x', self.H)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_x', 'id', self.H)
def set_operators_to_update(self, system):
"""Sets the operators to update to be what you need to TFIM.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
Notes
-----
The block Hamiltonian, althought needs to be updated, is treated
separately by the very functions in the `System` class.
"""
system.add_to_operators_to_update('s_z', site_op='s_z')
system.add_to_operators_to_update('s_x', site_op='s_x')
|
chanul13/hubbard_dimer
|
dmrg101/utils/models/tfi_model.py
|
Python
|
mit
| 2,957
|
from __future__ import print_function
import getpass
import logging
import os
import os.path as op
from ssbio.protein.sequence.utils import fasta as fasta
log = logging.getLogger(__name__)
class ITASSERPrep():
"""Prepare a protein sequence for an I-TASSER homology modeling run.
The main utilities of this class are to:
* Allow for the input of a protein sequence string and paths to I-TASSER to create execution scripts
* Automate large-scale homology modeling efforts by creating Slurm or TORQUE job scheduling scripts
Args:
ident: Identifier for your sequence. Will be used as the global ID (folder name, sequence name)
seq_str: Sequence in string format
root_dir: Local directory where I-TASSER folder will be created
itasser_path: Path to I-TASSER folder, i.e. '~/software/I-TASSER4.4'
itlib_path: Path to ITLIB folder, i.e. '~/software/ITLIB'
execute_dir: Optional path to execution directory - use this if you are copying the homology models to
another location such as a supercomputer for running
light: If simulations should be limited to 5 runs
runtype: How you will be running I-TASSER - local, slurm, or torque
print_exec: If the execution script should be printed out
java_home: Path to Java executable
binding_site_pred: If binding site predictions should be run
ec_pred: If EC number predictions should be run
go_pred: If GO term predictions should be run
additional_options: Any other additional I-TASSER options, appended to the command
job_scheduler_header: Any job scheduling options, prepended as a header to the file
"""
def __init__(self, ident, seq_str, root_dir, itasser_path, itlib_path,
execute_dir=None, light=True, runtype='local', print_exec=False, java_home=None,
binding_site_pred=False, ec_pred=False, go_pred=False, additional_options=None,
job_scheduler_header=None):
if runtype.lower() not in ['local', 'torque', 'slurm']:
raise ValueError('Invalid runtype, must be "local", "torque", "slurm"')
self.id = ident
self.seq_str = seq_str
if not self.seq_str:
raise ValueError('{}: no sequence input'.format(self.id))
if len(self.seq_str) < 10 or len(self.seq_str) > 1500:
log.warning('{}: I-TASSER modeling will not run as sequence length ({}) is not in the range [10, 1500]'.format(self.id, len(self.seq_str)))
self.root_dir = root_dir
if not op.exists(root_dir):
os.makedirs(root_dir)
if not execute_dir:
# If no execute_dir is given, use the same dir as the created folder
self.execute_dir = self.prep_folder(seq_str)
elif execute_dir:
orig_data_dir = self.prep_folder(seq_str)
self.execute_dir = op.join(execute_dir, op.basename(orig_data_dir))
self.print_exec = print_exec
self.runtype = runtype
if light:
light = 'true'
else:
light = 'false'
self.light = light
self.model_exists = op.exists(op.join(self.execute_dir, 'model1.pdb'))
if not additional_options:
additional_options = ''
else:
additional_options += ' '
if binding_site_pred:
additional_options += '-LBS true '
if ec_pred:
additional_options += '-EC true '
if go_pred:
additional_options += '-GO true '
self.additional_options = additional_options
if not java_home:
self.java_home = '${JAVA_HOME}'
else:
self.java_home = java_home
if not job_scheduler_header:
self.job_scheduler_header = ''
else:
self.job_scheduler_header = job_scheduler_header
if runtype == 'local' or runtype == 'torque':
self.prep_script_local(itasser_loc=itasser_path,
itlib_loc=itlib_path)
if runtype == 'slurm':
self.prep_script_slurm(itasser_loc=itasser_path,
itlib_loc=itlib_path)
def prep_folder(self, seq):
"""Take in a sequence string and prepares the folder for the I-TASSER run."""
itasser_dir = op.join(self.root_dir, self.id)
if not op.exists(itasser_dir):
os.makedirs(itasser_dir)
tmp = {self.id: seq}
fasta.write_fasta_file_from_dict(indict=tmp,
outname='seq',
outext='.fasta',
outdir=itasser_dir)
return itasser_dir
def prep_script_local(self, itasser_loc, itlib_loc):
script_file = '{}.sh'.format(self.id)
outfile = os.path.join(self.root_dir, script_file)
itasser = {'executable': op.join(itasser_loc, 'I-TASSERmod/runI-TASSER.pl'),
'pkgdir': itasser_loc,
'libdir': itlib_loc,
'seqname': self.id,
'datadir': self.execute_dir,
'java_home': self.java_home,
'additional_options': self.additional_options,
'light': self.light}
script = open(outfile, 'w')
script.write('#!/bin/bash -l\n')
if self.runtype == 'torque':
script.write('{}'.format(self.job_scheduler_header))
script.write(("{i[executable]} "
"-pkgdir {i[pkgdir]} "
"-libdir {i[libdir]} "
"-seqname {i[seqname]} "
"-datadir {i[datadir]} "
"-java_home {i[java_home]} "
"{i[additional_options]}"
"-light {i[light]}\n\n").format(i=itasser))
script.close()
os.chmod(outfile, 0o755)
if self.print_exec and self.runtype=='local':
print('nohup ./{} > {}.out &'.format(op.basename(outfile), os.path.join(self.root_dir, self.id)),
end='\n\n')
if self.print_exec and self.runtype == 'torque':
print('qsub {}'.format(op.basename(outfile), os.path.join(self.root_dir, self.id)),
end='; ')
return outfile
def prep_script_slurm(self, itasser_loc, itlib_loc):
script_file = '{}.slm'.format(self.id)
outfile = os.path.join(self.root_dir, script_file)
itasser = {'executable': op.join(itasser_loc, 'I-TASSERmod/runI-TASSER.pl'),
'pkgdir': itasser_loc,
'libdir': itlib_loc,
'seqname': self.id,
'datadir': self.execute_dir,
'java_home': self.java_home,
'light': self.light,
'additional_options': self.additional_options}
slurm = open(outfile, 'w')
slurm.write('#!/bin/bash -l\n')
slurm.write('{}'.format(self.job_scheduler_header))
slurm.write(('{i[executable]} '
'-pkgdir {i[pkgdir]} '
'-libdir {i[libdir]} '
'-seqname {i[seqname]} '
'-datadir {i[datadir]} '
'-java_home {i[java_home]} '
'{i[additional_options]}'
'-light {i[light]}\n\n').format(i=itasser))
slurm.close()
os.chmod(outfile, 0o755)
if self.print_exec:
print('sbatch {}'.format(op.basename(outfile)), end='; ')
return outfile
if __name__ == '__main__':
pass
# TODO: make this an executable script to
# 1) ask for global I-TASSER locations
# 2) ask for working directory
# 3) take in multiple inputs and prepare them for I-TASSER runs
# a) input types
# i) a single FASTA file with single or multiple sequences
# ii) multiple FASTA files contained in the working directory
# iii) a dataframe with IDs and sequences
# iv) a sequence string and an ID (and optional additional identifiers)
# b) types of runs
# i) NERSC slurm (sbatch) inputs
# ii) local torque (qsub) inputs
# iii) simple executable background scripts
# 4) Output executable scripts or submit things to the queue
# root = '/home/nathan/projects/GEM-PRO/cyano/'
# files = glob.glob(os.path.join(root,'*.faa'))
# for f in files:
# identifier = os.path.splitext(os.path.basename(f))[0]
# ip = ITASSERPrep(id=identifier, root_dir='/home/nathan/projects/GEM-PRO/cyano')
#
# sequence = sl.seq_loader(f, is_file=True)
# execute_dir = ip.prep_folder(sequence)
# ip.prep_script_local(itasser_loc='/home/nathan/software/I-TASSER4.4',
# itlib_loc='/home/nathan/software/ITLIB',
# datadir=execute_dir)
# ip = ITASSERPrep(id='W5EP13', root_dir='/home/nathan/projects/GEM-PRO/cyano/')
#
# sequence = sl.seq_loader('/home/nathan/Downloads/W5EP13.faa', is_file=True)
# execute_dir = ip.prep_folder(sequence)
# ip.prep_script_local(itasser_loc='/home/nathan/software/I-TASSER4.4',
# itlib_loc='/home/nathan/software/ITLIB',
# datadir=execute_dir)
## below is old run_all script in python
# import os
# import shutil
# import subprocess
#
# thedir = '.'
# folders = [name for name in os.listdir(
# thedir) if os.path.isdir(os.path.join(thedir, name))]
# folders = sorted(folders, reverse=True)
# for_ssb3 = folders[:len(folders) / 2]
#
# for fo in for_ssb3:
# coach = open('%s_coach.sh' % fo, 'w')
#
# coach.write('#!/bin/bash\n')
# coach.write('#PBS -l walltime=05:20:00\n')
# coach.write('#PBS -q regular\n')
# coach.write('#PBS -N %s\n' % fo)
# coach.write('perl ~/software/I-TASSER4.4/I-TASSERmod/runCOACH.pl -pkgdir /home/nathan/software/I-TASSER4.4 -libdir /home/nathan/software/ITLIB -protname %s -model model1.pdb -datadir /home/nathan/projects/GEM-PRO/yome/all_test/%s -GO true\n\n' % (fo, fo))
#
# coach.close()
#
# # subprocess.call('qsub %s_coach.sh;' % (fo), shell=True)
# print('qsub %s_coach.sh;' % (fo)),
|
SBRG/ssbio
|
ssbio/protein/structure/homology/itasser/itasserprep.py
|
Python
|
mit
| 10,329
|
import pytest
from CyberArkPAS import Client, add_user_command, get_users_command, \
update_user_command, add_safe_command, update_safe_command, get_list_safes_command, get_safe_by_name_command, \
add_safe_member_command, update_safe_member_command, list_safe_members_command, add_account_command, \
update_account_command, get_list_accounts_command, get_list_account_activity_command, fetch_incidents, \
get_account_details_command
from test_data.context import ADD_USER_CONTEXT, GET_USERS_CONTEXT, \
UPDATE_USER_CONTEXT, UPDATE_SAFE_CONTEXT, GET_LIST_SAFES_CONTEXT, GET_SAFE_BY_NAME_CONTEXT, ADD_SAFE_CONTEXT, \
ADD_SAFE_MEMBER_CONTEXT, UPDATE_SAFE_MEMBER_CONTEXT, LIST_SAFE_MEMBER_CONTEXT, ADD_ACCOUNT_CONTEXT, \
UPDATE_ACCOUNT_CONTEXT, GET_LIST_ACCOUNT_CONTEXT, GET_LIST_ACCOUNT_ACTIVITIES_CONTEXT, INCIDENTS, INCIDENTS_AFTER_FETCH, \
INCIDENTS_LIMITED_BY_MAX_SIZE, INCIDENTS_FILTERED_BY_SCORE, GET_ACCOUNT_CONTEXT
from test_data.http_resonses import ADD_USER_RAW_RESPONSE, \
UPDATE_USER_RAW_RESPONSE, GET_USERS_RAW_RESPONSE, ADD_SAFE_RAW_RESPONSE, UPDATE_SAFE_RAW_RESPONSE, \
GET_LIST_SAFES_RAW_RESPONSE, GET_SAFE_BY_NAME_RAW_RESPONSE, ADD_SAFE_MEMBER_RAW_RESPONSE, \
UPDATE_SAFE_MEMBER_RAW_RESPONSE, LIST_SAFE_MEMBER_RAW_RESPONSE, ADD_ACCOUNT_RAW_RESPONSE, \
UPDATE_ACCOUNT_RAW_RESPONSE, GET_LIST_ACCOUNT_RAW_RESPONSE, GET_LIST_ACCOUNT_ACTIVITIES_RAW_RESPONSE, \
GET_SECURITY_EVENTS_RAW_RESPONSE, GET_SECURITY_EVENTS_WITH_UNNECESSARY_INCIDENT_RAW_RESPONSE, \
GET_SECURITY_EVENTS_WITH_15_INCIDENT_RAW_RESPONSE, GET_ACCOUNT_RAW_RESPONSE
ADD_USER_ARGS = {
"change_password_on_the_next_logon": "true",
"description": "new user for test",
"email": "usertest@test.com",
"enable_user": "true",
"first_name": "user",
"last_name": "test",
"password": "12345Aa",
"password_never_expires": "false",
"profession": "testing integrations",
"username": "TestUser"
}
UPDATE_USER_ARGS = {
"change_password_on_the_next_logon": "true",
"description": "updated description",
"email": "update@test.com",
"enable_user": "true",
"first_name": "test1",
"last_name": "updated-name",
"password_never_expires": "false",
"profession": "test1",
"user_id": "123",
"username": "TestUser1"
}
GET_USER_ARGS = {
"filter": "filteroption",
"search": "searchoption"
}
ADD_SAFE_ARGS = {
"description": "safe for tests",
"number_of_days_retention": "100",
"safe_name": "TestSafe"
}
UPDATE_SAFE_ARGS = {
"description": "UpdatedSafe",
"number_of_days_retention": "150",
"safe_name": "TestSafe",
"safe_new_name": "UpdatedName"
}
GET_SAFE_BY_NAME_ARGS = {
"safe_name": "TestSafe"
}
ADD_SAFE_MEMBER_ARGS = {
"member_name": "TestUser",
"requests_authorization_level": "0",
"safe_name": "TestSafe"
}
UPDATE_SAFE_MEMBER_ARGS = {
"member_name": "TestUser",
"permissions": "UseAccounts",
"requests_authorization_level": "0",
"safe_name": "TestSafe"
}
LIST_SAFE_MEMBER_ARGS = {
"safe_name": "TestSafe"
}
ADD_ACCOUNT_ARGS = {
"account_name": "TestAccount1",
"address": "/",
"automatic_management_enabled": "true",
"password": "12345Aa",
"platform_id": "WinServerLocal",
"safe_name": "TestSafe",
"secret_type": "password",
"username": "TestUser"
}
UPDATE_ACCOUNT_ARGS = {
"account_id": "77_4",
"account_name": "NewName"
}
GET_ACCOUNT_ARGS = {
"account_id": "11_1",
}
GET_LIST_ACCOUNT_ARGS = {
"limit": "2",
"offset": "0"
}
GET_LIST_ACCOUNT_ACTIVITIES_ARGS = {
"account_id": "77_4"
}
@pytest.mark.parametrize('command, args, http_response, context', [
(add_user_command, ADD_USER_ARGS, ADD_USER_RAW_RESPONSE, ADD_USER_CONTEXT),
(update_user_command, UPDATE_USER_ARGS, UPDATE_USER_RAW_RESPONSE, UPDATE_USER_CONTEXT),
(get_users_command, {}, GET_USERS_RAW_RESPONSE, GET_USERS_CONTEXT),
(add_safe_command, ADD_SAFE_ARGS, ADD_SAFE_RAW_RESPONSE, ADD_SAFE_CONTEXT),
(update_safe_command, UPDATE_SAFE_ARGS, UPDATE_SAFE_RAW_RESPONSE, UPDATE_SAFE_CONTEXT),
(get_list_safes_command, {}, GET_LIST_SAFES_RAW_RESPONSE, GET_LIST_SAFES_CONTEXT),
(get_safe_by_name_command, GET_SAFE_BY_NAME_ARGS, GET_SAFE_BY_NAME_RAW_RESPONSE, GET_SAFE_BY_NAME_CONTEXT),
(add_safe_member_command, ADD_SAFE_MEMBER_ARGS, ADD_SAFE_MEMBER_RAW_RESPONSE, ADD_SAFE_MEMBER_CONTEXT),
(update_safe_member_command, UPDATE_SAFE_MEMBER_ARGS, UPDATE_SAFE_MEMBER_RAW_RESPONSE, UPDATE_SAFE_MEMBER_CONTEXT),
(list_safe_members_command, LIST_SAFE_MEMBER_ARGS, LIST_SAFE_MEMBER_RAW_RESPONSE, LIST_SAFE_MEMBER_CONTEXT),
(add_account_command, ADD_ACCOUNT_ARGS, ADD_ACCOUNT_RAW_RESPONSE, ADD_ACCOUNT_CONTEXT),
(update_account_command, UPDATE_ACCOUNT_ARGS, UPDATE_ACCOUNT_RAW_RESPONSE, UPDATE_ACCOUNT_CONTEXT),
(get_account_details_command, GET_ACCOUNT_ARGS, GET_ACCOUNT_RAW_RESPONSE, GET_ACCOUNT_CONTEXT),
(get_list_accounts_command, GET_LIST_ACCOUNT_ARGS, GET_LIST_ACCOUNT_RAW_RESPONSE, GET_LIST_ACCOUNT_CONTEXT),
(get_list_account_activity_command, GET_LIST_ACCOUNT_ACTIVITIES_ARGS, GET_LIST_ACCOUNT_ACTIVITIES_RAW_RESPONSE,
GET_LIST_ACCOUNT_ACTIVITIES_CONTEXT),
])
def test_cyberark_pas_commands(command, args, http_response, context, mocker):
"""Unit test
Given
- demisto args
- raw response of the http request
When
- mock the http request result
Then
- create the context
- validate the expected_result and the created context
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.cyberark.com/", username="user1", password="12345", use_ssl=False,
proxy=False, max_fetch=50)
mocker.patch.object(Client, '_http_request', return_value=http_response)
outputs = command(client, **args)
results = outputs.to_context()
assert results.get("EntryContext") == context
def test_fetch_incidents(mocker):
"""Unit test
Given
- raw response of the http request
When
- mock the http request result as 5 results that are sorted from the newest to the oldest
Then
- as defined in the demisto params - show only 2, those should be the oldest 2 available
- validate the incidents values
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.cyberark.com/", username="user1", password="12345", use_ssl=False,
proxy=False, max_fetch=50)
mocker.patch.object(Client, '_http_request', return_value=GET_SECURITY_EVENTS_RAW_RESPONSE)
_, incidents = fetch_incidents(client, {}, "3 days", "0", "2")
assert incidents == INCIDENTS
def test_fetch_incidents_with_an_incident_that_was_shown_before(mocker):
"""Unit test
Given
- demisto params
- raw response of the http request
When
- mock the http request result while one of the incidents was shown in the previous run
Then
- validate the incidents values, make sure the event that was shown before is not in
the incidents again
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.cyberark.com/", username="user1", password="12345", use_ssl=False,
proxy=False, max_fetch=50)
mocker.patch.object(Client, '_http_request', return_value=GET_SECURITY_EVENTS_WITH_UNNECESSARY_INCIDENT_RAW_RESPONSE)
# the last run dict is the same we would have got if we run the prev test before
last_run = {'time': 1594573600000, 'last_event_ids': '["5f0b3064e4b0ba4baf5c1113", "5f0b4320e4b0ba4baf5c2b05"]'}
_, incidents = fetch_incidents(client, last_run, "3 days", "0", "1")
assert incidents == INCIDENTS_AFTER_FETCH
def test_fetch_incidents_with_more_incidents_than_max_size(mocker):
"""Unit test
Given
- demisto params
- raw response of the http request
When
- mock the http request result while the result is 15 incidents and we only wish to see 5
Then
- validate the incidents values, make sure make sure that there are only 5 incidents and that there
are the oldest
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.cyberark.com/", username="user1", password="12345", use_ssl=False,
proxy=False, max_fetch=5)
mocker.patch.object(Client, '_http_request', return_value=GET_SECURITY_EVENTS_WITH_15_INCIDENT_RAW_RESPONSE)
_, incidents = fetch_incidents(client, {}, "3 days", "0", max_fetch="5")
assert len(incidents) == 5
assert incidents == INCIDENTS_LIMITED_BY_MAX_SIZE
def test_fetch_incidents_with_specific_score(mocker):
"""Unit test
Given
- demisto params
- raw response of the http request
When
- mock the http request result while the result is 15 incidents and we only wish to see 5
Then
- validate the incidents values, make sure make sure that there are only 5 incidents and that there
are the oldest
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.cyberark.com/", username="user1", password="12345", use_ssl=False,
proxy=False, max_fetch=10)
mocker.patch.object(Client, '_http_request', return_value=GET_SECURITY_EVENTS_WITH_15_INCIDENT_RAW_RESPONSE)
_, incidents = fetch_incidents(client, {}, "3 days", score="50", max_fetch="10")
assert len(incidents) == 3
assert incidents == INCIDENTS_FILTERED_BY_SCORE
|
VirusTotal/content
|
Packs/CyberArkPAS/Integrations/CyberArkPAS/CyberArkPAS_test.py
|
Python
|
mit
| 9,611
|
r"""
Examples
--------
::
* - *
/ \ / \
* - * - *
/ \ / \ / \
* - * - * - *
\ / \ / \ /
* - * - *
\ / \ /
* - *
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((5, 2), node_layout="hex", sort=True)
>>> graph.number_of_nodes
14
>>> graph.x_of_node
array([ 1. , 2. ,
0.5, 1.5, 2.5,
0. , 1. , 2. , 3. ,
0.5, 1.5, 2.5,
1. , 2. ])
>>> graph.number_of_links
29
>>> graph.number_of_patches
16
::
* - * - * - *
\ / \ / \ / \
* - * - * - *
/ \ / \ / \ /
* - * - * - *
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), orientation="horizontal", node_layout="rect", sort=True)
>>> graph.number_of_nodes
12
>>> graph.x_of_node.reshape((3, 4))
array([[ 0. , 1. , 2. , 3. ],
[ 0.5, 1.5, 2.5, 3.5],
[ 0. , 1. , 2. , 3. ]])
>>> graph.number_of_links
23
>>> graph.number_of_patches
12
"""
from functools import lru_cache
import numpy as np
from ...core.utils import as_id_array
from ...utils.decorators import cache_result_in_object, make_return_array_immutable
from ..graph import Graph
from ..voronoi.voronoi import DelaunayGraph
class HorizontalRectTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_rect_horizontal
x_of_node = np.empty(shape[0] * shape[1], dtype=float)
y_of_node = np.empty(shape[0] * shape[1], dtype=float)
fill_xy_of_node_rect_horizontal(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing
y_of_node[:] *= spacing * np.sin(np.pi / 3.0)
x_of_node[:] += xy_of_lower_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class VerticalRectTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_rect_vertical
n_rows, n_cols = shape
x_spacing = np.sin(np.pi / 3.0) * spacing
y_spacing = spacing
x_of_node = np.empty(n_rows * n_cols, dtype=float)
y_of_node = np.empty(n_rows * n_cols, dtype=float)
fill_xy_of_node_rect_vertical(shape, x_of_node, y_of_node)
x_of_node *= x_spacing
y_of_node *= y_spacing
x_of_node += xy_of_lower_left[0]
y_of_node += xy_of_lower_left[1]
return x_of_node, y_of_node
x_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_cols // 2) * x_spacing * 2.0 + x_spacing + xy_of_lower_left[0]
)
x_of_node[:, : (n_cols + 1) // 2] = (
np.arange((n_cols + 1) // 2) * x_spacing * 2.0 + xy_of_lower_left[0]
)
y_of_node[:, : (n_cols + 1) // 2] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1]
).reshape((n_rows, 1))
y_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1] + y_spacing * 0.5
).reshape((n_rows, 1))
return x_of_node.reshape(-1), y_of_node.reshape(-1)
class HorizontalHexTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_hex_horizontal
n_rows, n_cols = shape
n_nodes = n_rows * n_cols + (n_rows // 2) ** 2
x_of_node = np.empty(n_nodes, dtype=float)
y_of_node = np.empty(n_nodes, dtype=float)
fill_xy_of_node_hex_horizontal(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing
y_of_node[:] *= spacing * np.sin(np.pi / 3.0)
x_of_node[:] += xy_of_lower_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class VerticalHexTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_hex_vertical
n_rows, n_cols = shape
n_nodes = n_cols * n_rows + (n_cols // 2) ** 2
x_of_node = np.empty(n_nodes, dtype=float)
y_of_node = np.empty(n_nodes, dtype=float)
fill_xy_of_node_hex_vertical(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing * np.sin(np.pi / 3.0)
y_of_node[:] *= spacing
x_of_node[:] += xy_of_lower_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class HorizontalRectTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing, spacing * np.sin(np.pi / 3.0)
x_of_node, y_of_node = np.meshgrid(
np.arange(n_cols) * x_spacing + xy_of_lower_left[0],
np.arange(n_rows) * y_spacing + xy_of_lower_left[1],
)
x_of_node[1::2] += spacing * 0.5
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalRectTriGraph
>>> HorizontalRectTriGraph.corner_nodes((3, 4))
(11, 8, 0, 3)
>>> HorizontalRectTriGraph.corner_nodes((3, 2))
(5, 4, 0, 1)
>>> HorizontalRectTriGraph.corner_nodes((7, 1))
(6, 6, 0, 0)
>>> HorizontalRectTriGraph.corner_nodes((1, 3))
(2, 0, 0, 2)
"""
n_rows, n_cols = shape
return (n_rows * n_cols - 1, n_cols * (n_rows - 1), 0, n_cols - 1)
@staticmethod
def number_of_perimeter_nodes(shape):
if 1 in shape:
return np.prod(shape)
return 2 * shape[0] + 2 * (shape[1] - 2)
@staticmethod
def perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalRectTriGraph
>>> HorizontalRectTriGraph.perimeter_nodes((3, 2))
array([1, 3, 5, 4, 2, 0])
"""
return np.concatenate(HorizontalRectTriGraph.nodes_at_edge(shape))
@staticmethod
def nodes_at_edge(shape):
n_rows, n_cols = shape
if n_rows == n_cols == 1:
return (np.array([0]),) + (np.array([], dtype=int),) * 3
(
northeast,
northwest,
southwest,
southeast,
) = HorizontalRectTriGraph.corner_nodes(shape)
if n_rows > 1:
south = np.arange(southwest, southeast)
else:
south = np.array([southwest], dtype=int)
if n_cols > 1:
west = np.arange(northwest, southwest, -n_cols)
else:
west = np.array([northwest], dtype=int)
return (
np.arange(southeast, northeast, n_cols),
np.arange(northeast, northwest, -1),
west,
south,
)
class VerticalRectTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing * np.sin(np.pi / 3.0), spacing
x_of_node = np.empty((n_rows, n_cols), dtype=float)
y_of_node = np.empty((n_rows, n_cols), dtype=float)
x_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_cols // 2) * x_spacing * 2.0 + x_spacing + xy_of_lower_left[0]
)
x_of_node[:, : (n_cols + 1) // 2] = (
np.arange((n_cols + 1) // 2) * x_spacing * 2.0 + xy_of_lower_left[0]
)
y_of_node[:, : (n_cols + 1) // 2] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1]
).reshape((n_rows, 1))
y_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1] + y_spacing * 0.5
).reshape((n_rows, 1))
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalRectTriGraph
>>> VerticalRectTriGraph.corner_nodes((4, 3))
(10, 9, 0, 1)
>>> VerticalRectTriGraph.corner_nodes((4, 4))
(15, 12, 0, 3)
>>> VerticalRectTriGraph.corner_nodes((3, 2))
(5, 4, 0, 1)
>>> VerticalRectTriGraph.corner_nodes((7, 1))
(6, 6, 0, 0)
>>> VerticalRectTriGraph.corner_nodes((1, 3))
(1, 0, 0, 1)
>>> VerticalRectTriGraph.corner_nodes((2, 3))
(4, 3, 0, 1)
"""
n_rows, n_cols = shape
if n_cols % 2 == 0:
return (n_rows * n_cols - 1, n_cols * (n_rows - 1), 0, n_cols - 1)
else:
return (
n_rows * n_cols - 1 - n_cols // 2,
n_cols * (n_rows - 1),
0,
n_cols // 2,
)
@staticmethod
def number_of_perimeter_nodes(shape):
if 1 in shape:
return np.prod(shape)
return 2 * shape[1] + 2 * (shape[0] - 2)
@staticmethod
def perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalRectTriGraph
>>> VerticalRectTriGraph.perimeter_nodes((3, 2))
array([1, 3, 5, 4, 2, 0])
>>> VerticalRectTriGraph.perimeter_nodes((2, 3))
array([1, 4, 5, 3, 0, 2])
>>> VerticalRectTriGraph.perimeter_nodes((2, 4))
array([3, 7, 5, 6, 4, 0, 2, 1])
"""
return np.concatenate(VerticalRectTriGraph.nodes_at_edge(shape))
@staticmethod
def nodes_at_edge(shape):
n_rows, n_cols = shape
if n_rows == n_cols == 1:
return (np.array([0]),) + (np.array([], dtype=int),) * 3
n_nodes = n_rows * n_cols
(
northeast,
northwest,
southwest,
southeast,
) = VerticalRectTriGraph.corner_nodes(shape)
if n_cols == 1:
southwest = northwest - n_cols
north = np.empty(n_cols - 1, dtype=int)
north[::2] = n_nodes - n_cols // 2 + np.arange(n_cols // 2)
north[1::2] = northwest + np.arange(1, n_cols - n_cols // 2)
if n_rows > 1:
south = np.empty(n_cols - 1, dtype=int)
south[::2] = np.arange(0, n_cols // 2)
south[1::2] = (n_cols + 1) // 2 + np.arange(n_cols - n_cols // 2 - 1)
else:
south = np.array([southwest], dtype=int)
return (
np.arange(southeast, northeast, n_cols),
north[::-1],
np.arange(northwest, southwest, -n_cols),
south,
)
class HorizontalHexTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols + (n_rows // 2) ** 2
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing, spacing * np.sin(np.pi / 3.0)
length_of_row = np.concatenate(
(
np.arange((n_rows + 2) // 2) + n_cols,
(n_rows + 2) // 2
+ n_cols
- 1
- np.arange(1, n_rows - (n_rows + 2) // 2 + 1),
)
)
offset_to_row = np.concatenate((np.array([0]), length_of_row)).cumsum()
rows = [
slice(start, end)
for start, end in zip(offset_to_row[:-1], offset_to_row[1:])
]
y_of_node = np.empty(HorizontalHexTriGraph.number_of_nodes(shape), dtype=float)
for row, inds in enumerate(rows):
y_of_node[inds] = row * y_spacing + xy_of_lower_left[1]
x_of_node = np.empty(HorizontalHexTriGraph.number_of_nodes(shape), dtype=float)
x_of_row = (
np.abs((n_rows + 2) // 2 - 1 - np.arange(n_rows)) * x_spacing * 0.5
+ xy_of_lower_left[0]
)
for row, inds in enumerate(rows):
x_of_node[inds] = x_of_row[row] + np.arange(length_of_row[row]) * x_spacing
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalHexTriGraph
>>> HorizontalHexTriGraph.corner_nodes((3, 2))
(4, 6, 5, 2, 0, 1)
>>> HorizontalHexTriGraph.corner_nodes((7, 1))
(9, 15, 15, 6, 0, 0)
>>> HorizontalHexTriGraph.corner_nodes((6, 1))
(9, 14, 13, 6, 0, 0)
>>> HorizontalHexTriGraph.corner_nodes((4, 2))
(8, 11, 9, 5, 0, 1)
"""
n_rows, n_cols = shape
n_nodes_in_middle_row = n_rows // 2 + n_cols
n_nodes = n_rows * n_cols + (n_rows // 2) ** 2
east = (n_nodes_in_middle_row + n_cols) * ((n_rows // 2 + 1) // 2) - 1
if (n_rows // 2) % 2 == 0:
east += (n_nodes_in_middle_row + n_cols) // 2
return (
east,
n_nodes - 1,
n_nodes - (n_cols + (n_rows + 1) % 2),
east - (n_nodes_in_middle_row - 1),
0,
n_cols - 1,
)
@staticmethod
def number_of_perimeter_nodes(shape):
if shape[0] == 1:
return shape[1]
return 2 * shape[0] + 2 * (shape[1] - 2) + (shape[0] + 1) % 2
@staticmethod
def perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalHexTriGraph
>>> HorizontalHexTriGraph.perimeter_nodes((3, 2))
array([4, 6, 5, 2, 0, 1])
>>> HorizontalHexTriGraph.perimeter_nodes((1, 3))
array([2, 1, 0])
"""
return np.concatenate(HorizontalHexTriGraph.nodes_at_edge(shape))
@staticmethod
def nodes_at_edge(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalHexTriGraph
>>> HorizontalHexTriGraph.nodes_at_edge((5, 3))
(array([11, 15]), array([18, 17]), array([16, 12]), array([7, 3]), array([0, 1]), array([2, 6]))
>>> HorizontalHexTriGraph.nodes_at_edge((4, 3))
(array([11]), array([15, 14, 13]), array([12]), array([7, 3]), array([0, 1]), array([2, 6]))
"""
n_rows, n_cols = shape
(
east,
northeast,
northwest,
west,
southwest,
southeast,
) = HorizontalHexTriGraph.corner_nodes(shape)
if n_rows == 1:
nodes_at_south_edge = np.asarray([southwest], dtype=np.int)
else:
nodes_at_south_edge = np.arange(southwest, southeast)
return (
as_id_array(
northeast
- np.arange(northeast - northwest + 1, east - west + 1).cumsum()[::-1]
),
np.arange(northeast, northwest, -1),
as_id_array(
west
+ np.arange(east - west + 1, northeast - northwest + 1, -1).cumsum()[
::-1
]
),
as_id_array(
southwest + np.arange(n_cols, n_rows // 2 + n_cols).cumsum()[::-1]
),
nodes_at_south_edge,
as_id_array(
east - np.arange(n_cols + n_rows // 2, n_cols, -1).cumsum()[::-1]
),
)
class VerticalHexTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols + (n_cols // 2) ** 2
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing * np.sin(np.pi / 3.0), spacing
length_of_middle_rows = np.full(2 * n_rows - 1, n_cols // 2)
if n_cols % 2 == 1:
length_of_middle_rows[::2] += 1
length_of_row = np.concatenate(
(
np.arange(1, n_cols // 2 + 1),
length_of_middle_rows,
np.arange(n_cols // 2, 0, -1),
)
)
offset_to_row = np.concatenate((np.array([0]), length_of_row)).cumsum()
rows = [
slice(start, end)
for start, end in zip(offset_to_row[:-1], offset_to_row[1:])
]
y_of_node = np.empty(VerticalHexTriGraph.number_of_nodes(shape), dtype=float)
for row, inds in enumerate(rows):
y_of_node[inds] = row * y_spacing * 0.5
x_of_node = np.empty(VerticalHexTriGraph.number_of_nodes(shape), dtype=float)
x_of_middle_rows = np.zeros(2 * n_rows - 1)
x_of_middle_rows[1::2] += 1.0
x_of_row = (
np.concatenate(
(
np.arange(n_cols // 2, 0, -1),
x_of_middle_rows,
np.arange(1, n_cols // 2 + 1),
)
)
* x_spacing
)
for row, inds in enumerate(rows):
x_of_node[inds] = (
x_of_row[row] + np.arange(length_of_row[row]) * 2.0 * x_spacing
)
x_of_node += xy_of_lower_left[0]
y_of_node += xy_of_lower_left[1]
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalHexTriGraph
>>> VerticalHexTriGraph.corner_nodes((2, 5))
(10, 13, 8, 3, 0, 5)
>>> VerticalHexTriGraph.corner_nodes((2, 3))
(5, 6, 4, 1, 0, 2)
>>> VerticalHexTriGraph.corner_nodes((2, 4))
(10, 11, 7, 3, 0, 2)
>>> VerticalHexTriGraph.corner_nodes((2, 2))
(4, 4, 3, 1, 0, 0)
>>> VerticalHexTriGraph.corner_nodes((3, 1))
(2, 2, 2, 0, 0, 0)
>>> VerticalHexTriGraph.corner_nodes((1, 3))
(2, 3, 1, 1, 0, 2)
"""
n_rows, n_cols = shape
n_nodes = n_rows * n_cols + (n_cols // 2) ** 2
n = n_cols // 2
tri_nodes = (n + 1) * (n // 2)
if n % 2 == 1:
tri_nodes += (n + 1) // 2
if n_cols % 2 == 0:
southeast = tri_nodes - 1
southwest = tri_nodes
northwest = n_nodes - 1 - (tri_nodes - 1 + n_cols // 2)
northeast = n_nodes - 1 - (tri_nodes - n)
else:
southwest = tri_nodes
southeast = tri_nodes + n_cols // 2
northwest = n_nodes - (tri_nodes + (n_cols + 1) // 2)
northeast = n_nodes - 1 - tri_nodes
south = 0
north = n_nodes - 1
return (northeast, north, northwest, southwest, south, southeast)
@staticmethod
def number_of_perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalHexTriGraph
>>> VerticalHexTriGraph.number_of_perimeter_nodes((2, 3))
6
>>> VerticalHexTriGraph.number_of_perimeter_nodes((2, 2))
5
"""
if shape[1] == 1:
return shape[0]
return 2 * shape[1] + 2 * (shape[0] - 2) + (shape[1] + 1) % 2
@staticmethod
def perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalHexTriGraph
>>> VerticalHexTriGraph.perimeter_nodes((3, 7))
array([ 9, 16, 23, 26, 28, 29, 27, 24, 20, 13, 6, 3, 1, 0, 2, 5])
>>> VerticalHexTriGraph.perimeter_nodes((2, 3))
array([2, 5, 6, 4, 1, 0])
>>> VerticalHexTriGraph.perimeter_nodes((2, 4))
array([ 2, 6, 10, 11, 9, 7, 3, 1, 0])
>>> VerticalHexTriGraph.perimeter_nodes((2, 2))
array([0, 2, 4, 3, 1])
>>> VerticalHexTriGraph.perimeter_nodes((3, 1))
array([0, 1, 2])
"""
return np.concatenate(VerticalHexTriGraph.nodes_at_edge(shape))
@staticmethod
def nodes_at_edge(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalHexTriGraph
>>> VerticalHexTriGraph.nodes_at_edge((3, 7))
(array([ 9, 16]), array([23, 26, 28]), array([29, 27, 24]), array([20, 13]), array([6, 3, 1]), array([0, 2, 5]))
>>> VerticalHexTriGraph.nodes_at_edge((2, 3))
(array([2]), array([5]), array([6]), array([4]), array([1]), array([0]))
>>> VerticalHexTriGraph.nodes_at_edge((2, 4))
(array([2, 6]), array([10]), array([11, 9]), array([7]), array([3, 1]), array([0]))
"""
n_rows, n_cols = shape
(
northeast,
north,
northwest,
southwest,
south,
southeast,
) = VerticalHexTriGraph.corner_nodes(shape)
if shape[1] == 1:
southwest = northwest - n_cols
return (
np.arange(southeast, northeast, n_cols),
as_id_array(north - np.arange(1, (n_cols + 1) // 2).cumsum())[::-1],
as_id_array(north - np.arange(1, (n_cols + 2) // 2).cumsum() + 1),
np.arange(northwest, southwest, -n_cols),
as_id_array(south + np.arange(1, (n_cols + 2) // 2).cumsum())[::-1],
as_id_array(south + np.arange(1, (n_cols + 1) // 2).cumsum() - 1),
)
class HexGraphExtras:
@property
@cache_result_in_object()
@make_return_array_immutable
def nodes_at_right_edge(self):
"""Get nodes along the right edge.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), node_layout='rect')
>>> graph.nodes_at_right_edge
array([ 3, 7, 11])
"""
return np.arange(
self.shape[1] - 1, self.shape[0] * self.shape[1], self.shape[1], dtype=int
)
@property
@cache_result_in_object()
@make_return_array_immutable
def nodes_at_top_edge(self):
"""Get nodes along the top edge.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), node_layout='rect')
>>> graph.nodes_at_top_edge
array([ 8, 9, 10, 11])
"""
return np.arange(
self.number_of_nodes - self.shape[1], self.number_of_nodes, dtype=int
)
@property
@cache_result_in_object()
@make_return_array_immutable
def nodes_at_left_edge(self):
"""Get nodes along the left edge.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), node_layout='rect')
>>> graph.nodes_at_left_edge
array([0, 4, 8])
"""
return np.arange(0, self.shape[0] * self.shape[1], self.shape[1], dtype=int)
@property
@cache_result_in_object()
@make_return_array_immutable
def nodes_at_bottom_edge(self):
"""Get nodes along the bottom edge.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), node_layout='rect')
>>> graph.nodes_at_bottom_edge
array([0, 1, 2, 3])
"""
return np.arange(self.shape[1], dtype=int)
@property
@cache_result_in_object()
@make_return_array_immutable
def length_of_link(self):
return np.full(self.number_of_links, self.spacing, dtype=float)
class TriGraph(HexGraphExtras, DelaunayGraph):
"""Graph of a structured grid of triangles.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 2))
>>> graph.number_of_nodes == 6
True
>>> np.round(graph.y_of_node * 2. / np.sqrt(3))
... # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 1., 1., 2., 2.])
>>> graph.x_of_node # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 0.5, 1.5, 0. , 1. ])
"""
def __init__(
self,
shape,
spacing=1.0,
xy_of_lower_left=(0.0, 0.0),
orientation="horizontal",
node_layout="rect",
sort=False,
):
"""Create a structured grid of triangles.
Parameters
----------
shape : tuple of int
Number of rows and columns of the hex grid. The first value
is the number of nodes in the first column and the second the
number of nodes in the first column.
spacing : float, optional
Length of links.
xy_of_lower_left : tuple of float, optional
Coordinates of lower-left corner of the grid.
orientation: {'horizontal', 'vertical'}
Specify if triangles should be laid out in rows or columns.
node_layout: {'rect', 'hex'}
Specify the overall layout of the nodes. Use *rect* for
the layout to approximate a rectangle and *hex* for
a hexagon.
"""
if node_layout not in ("rect", "hex"):
raise ValueError("node_layout not understood")
if orientation not in ("horizontal", "vertical"):
raise ValueError("orientation not understood")
layouts = {
"horizontal_hex": HorizontalHexTriGraph,
"vertical_hex": VerticalHexTriGraph,
"horizontal_rect": HorizontalRectTriGraph,
"vertical_rect": VerticalRectTriGraph,
}
layout = layouts["_".join([orientation, node_layout])]
try:
spacing = float(spacing)
except TypeError:
raise TypeError("spacing must be a float")
self._shape = tuple(shape)
self._spacing = spacing
self._orientation = orientation
self._node_layout = node_layout
x_of_node, y_of_node = layout.xy_of_node(
shape, spacing=spacing, xy_of_lower_left=xy_of_lower_left
)
self._perimeter_nodes = layout.perimeter_nodes(shape)
perimeter_links = np.empty((len(self._perimeter_nodes), 2), dtype=int)
perimeter_links[:, 0] = self._perimeter_nodes
perimeter_links[:-1, 1] = self._perimeter_nodes[1:]
perimeter_links[-1, 1] = self._perimeter_nodes[0]
if 1 in shape:
Graph.__init__(
self,
(y_of_node, x_of_node),
links=list(
zip(np.arange(len(y_of_node) - 1), np.arange(1, len(y_of_node)))
),
sort=False,
)
else:
DelaunayGraph.__init__(
self,
(y_of_node, x_of_node),
perimeter_links=perimeter_links,
sort=False,
)
if sort:
self.sort()
@property
def shape(self):
return self._shape
@property
def spacing(self):
return self._spacing
@property
def orientation(self):
return self._orientation
@property
def node_layout(self):
return self._node_layout
@property
@lru_cache()
@make_return_array_immutable
def perimeter_nodes(self):
return self._perimeter_nodes
|
cmshobe/landlab
|
landlab/graph/hex/hex.py
|
Python
|
mit
| 27,221
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Package'
db.create_table('banzai_package', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('status', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('pack_id', self.gf('django.db.models.fields.CharField')(max_length=100)),
('emails_all', self.gf('django.db.models.fields.PositiveIntegerField')()),
('emails_correct', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal('banzai', ['Package'])
# Adding model 'Report'
db.create_table('banzai_report', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('package', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['banzai.Package'])),
('status', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('reject_code', self.gf('django.db.models.fields.CharField')(max_length=250)),
('reject_message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('banzai', ['Report'])
# Adding model 'ReportFBL'
db.create_table('banzai_reportfbl', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('package', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['banzai.Package'])),
('status', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('banzai', ['ReportFBL'])
def backwards(self, orm):
# Deleting model 'Package'
db.delete_table('banzai_package')
# Deleting model 'Report'
db.delete_table('banzai_report')
# Deleting model 'ReportFBL'
db.delete_table('banzai_reportfbl')
models = {
'banzai.package': {
'Meta': {'object_name': 'Package'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'emails_all': ('django.db.models.fields.PositiveIntegerField', [], {}),
'emails_correct': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pack_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
'banzai.report': {
'Meta': {'object_name': 'Report'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['banzai.Package']"}),
'reject_code': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'reject_message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
'banzai.reportfbl': {
'Meta': {'object_name': 'ReportFBL'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['banzai.Package']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
}
}
complete_apps = ['banzai']
|
Tapo4ek/django-banzai
|
banzai/migrations/0001_initial.py
|
Python
|
mit
| 4,368
|
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets
# in the array which gives the sum of zero.
#
# Note: The solution set must not contain duplicate triplets.
#
# For example, given array S = [-1, 0, 1, 2, -1, -4],
#
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in xrange(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l, r = i + 1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s < 0:
l += 1
elif s > 0:
r -= 1
else:
res.append((nums[i], nums[l], nums[r]))
while l < r and nums[l] == nums[l + 1]:
l += 1
while l < r and nums[r] == nums[r - 1]:
r -= 1
l += 1
r -= 1
return res
# Note:
# Iterating through the list with the pointer i and then we try to find two extra numbers to sum to 0.
# Since the list is ordered, the right pointer will always be higher than the left pointer.
# So if the sum is too large, you can move the right pointer back one. On the other hand, if the sum is
# too small (below 0), then move the middle pointer up one.
#
# To avoid duplicates, we skip further evaluation if pointer i equals pointer i-1
|
jigarkb/CTCI
|
LeetCode/015-M-3Sum.py
|
Python
|
mit
| 1,646
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/tatooine/shared_housing_tatt_style02_large.iff"
result.attribute_template_id = -1
result.stfName("building_name","housing_tatt_style01_large")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/building/tatooine/shared_housing_tatt_style02_large.py
|
Python
|
mit
| 479
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('connectors', '0002_parameter_description'),
]
operations = [
migrations.RenameField(
model_name='callback',
old_name='type',
new_name='method',
),
]
|
rebearteta/social-ideation
|
connectors/migrations/0003_auto_20150423_1545.py
|
Python
|
mit
| 393
|
from vint.ast.traversing import traverse, register_traverser_extension
from vint.ast.parsing import Parser
from vint.ast.node_type import NodeType
REDIR_CONTENT = 'VINT:redir_content'
class RedirAssignmentParser(object):
""" A class to make redir assignment parseable. """
def process(self, ast):
def enter_handler(node):
node_type = NodeType(node['type'])
if node_type is not NodeType.EXCMD:
return
is_redir_command = node['ea']['cmd'].get('name') == 'redir'
if not is_redir_command:
return
redir_cmd_str = node['str']
is_redir_assignment = '=>' in redir_cmd_str
if not is_redir_assignment:
return
parser = Parser()
redir_content_node = parser.parse_redir(node)
node[REDIR_CONTENT] = redir_content_node
traverse(ast, on_enter=enter_handler)
return ast
def get_redir_content(node):
return node.get(REDIR_CONTENT)
@register_traverser_extension
def traverse_redir_content(node, on_enter=None, on_leave=None):
if REDIR_CONTENT not in node:
return
traverse(node[REDIR_CONTENT], on_enter=on_enter, on_leave=on_leave)
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/redir_assignment_parser.py
|
Python
|
mit
| 1,250
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class AutoscaleSettingResourcePaged(Paged):
"""
A paging container for iterating over a list of :class:`AutoscaleSettingResource <azure.mgmt.monitor.models.AutoscaleSettingResource>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[AutoscaleSettingResource]'}
}
def __init__(self, *args, **kwargs):
super(AutoscaleSettingResourcePaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-monitor/azure/mgmt/monitor/models/autoscale_setting_resource_paged.py
|
Python
|
mit
| 1,000
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of functions and classes to fix various encoding problems on
multiple platforms with python.
"""
import codecs
import locale
import os
import sys
# Prevents initializing multiple times.
_SYS_ARGV_PROCESSED = False
def complain(message):
"""If any exception occurs in this file, we'll probably try to print it
on stderr, which makes for frustrating debugging if stderr is directed
to our wrapper. So be paranoid about catching errors and reporting them
to sys.__stderr__, so that the user has a higher chance to see them.
"""
print >> sys.__stderr__, (
isinstance(message, str) and message or repr(message))
def fix_default_encoding():
"""Forces utf8 solidly on all platforms.
By default python execution environment is lazy and defaults to ascii
encoding.
http://uucode.com/blog/2007/03/23/shut-up-you-dummy-7-bit-python/
"""
if sys.getdefaultencoding() == 'utf-8':
return False
# Regenerate setdefaultencoding.
reload(sys)
# Module 'sys' has no 'setdefaultencoding' member
# pylint: disable=E1101
sys.setdefaultencoding('utf-8')
for attr in dir(locale):
if attr[0:3] != 'LC_':
continue
aref = getattr(locale, attr)
try:
locale.setlocale(aref, '')
except locale.Error:
continue
try:
lang = locale.getlocale(aref)[0]
except (TypeError, ValueError):
continue
if lang:
try:
locale.setlocale(aref, (lang, 'UTF-8'))
except locale.Error:
os.environ[attr] = lang + '.UTF-8'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
return True
###############################
# Windows specific
def fix_win_sys_argv(encoding):
"""Converts sys.argv to 'encoding' encoded string.
utf-8 is recommended.
Works around <http://bugs.python.org/issue2128>.
"""
global _SYS_ARGV_PROCESSED
if _SYS_ARGV_PROCESSED:
return False
# These types are available on linux but not Mac.
# pylint: disable=E0611,F0401
from ctypes import byref, c_int, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import LPCWSTR, LPWSTR
# <http://msdn.microsoft.com/en-us/library/ms683156.aspx>
GetCommandLineW = WINFUNCTYPE(LPWSTR)(('GetCommandLineW', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/bb776391.aspx>
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
('CommandLineToArgvW', windll.shell32))
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [
argv_unicode[i].encode(encoding, 'replace')
for i in xrange(0, argc.value)]
if not hasattr(sys, 'frozen'):
# If this is an executable produced by py2exe or bbfreeze, then it
# will have been invoked directly. Otherwise, unicode_argv[0] is the
# Python interpreter, so skip that.
argv = argv[1:]
# Also skip option arguments to the Python interpreter.
while len(argv) > 0:
arg = argv[0]
if not arg.startswith(u'-') or arg == u'-':
break
argv = argv[1:]
if arg == u'-m':
# sys.argv[0] should really be the absolute path of the
# module source, but never mind.
break
if arg == u'-c':
argv[0] = u'-c'
break
sys.argv = argv
_SYS_ARGV_PROCESSED = True
return True
def fix_win_codec():
"""Works around <http://bugs.python.org/issue6058>."""
# <http://msdn.microsoft.com/en-us/library/dd317756.aspx>
try:
codecs.lookup('cp65001')
return False
except LookupError:
codecs.register(
lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
return True
class WinUnicodeOutputBase(object):
"""Base class to adapt sys.stdout or sys.stderr to behave correctly on
Windows.
Setting encoding to utf-8 is recommended.
"""
def __init__(self, fileno, name, encoding):
# Corresponding file handle.
self._fileno = fileno
self.encoding = encoding
self.name = name
self.closed = False
self.softspace = False
self.mode = 'w'
@staticmethod
def isatty():
return False
def close(self):
# Don't really close the handle, that would only cause problems.
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
raise NotImplementedError()
def write(self, text):
raise NotImplementedError()
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception, e:
complain('%s.writelines: %r' % (self.name, e))
raise
class WinUnicodeConsoleOutput(WinUnicodeOutputBase):
"""Output adapter to a Windows Console.
Understands how to use the win32 console API.
"""
def __init__(self, console_handle, fileno, stream_name, encoding):
super(WinUnicodeConsoleOutput, self).__init__(
fileno, '<Unicode console %s>' % stream_name, encoding)
# Handle to use for WriteConsoleW
self._console_handle = console_handle
# Loads the necessary function.
# These types are available on linux but not Mac.
# pylint: disable=E0611,F0401
from ctypes import byref, GetLastError, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPWSTR
from ctypes.wintypes import LPVOID # pylint: disable=E0611
self._DWORD = DWORD
self._byref = byref
# <http://msdn.microsoft.com/en-us/library/ms687401.aspx>
self._WriteConsoleW = WINFUNCTYPE(
BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(
('WriteConsoleW', windll.kernel32))
self._GetLastError = GetLastError
def flush(self):
# No need to flush the console since it's immediate.
pass
def write(self, text):
try:
if not isinstance(text, unicode):
# Convert to unicode.
text = str(text).decode(self.encoding, 'replace')
remaining = len(text)
while remaining > 0:
n = self._DWORD(0)
# There is a shorter-than-documented limitation on the length of the
# string passed to WriteConsoleW. See
# <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = self._WriteConsoleW(
self._console_handle, text,
min(remaining, 10000),
self._byref(n), None)
if retval == 0 or n.value == 0:
raise IOError(
'WriteConsoleW returned %r, n.value = %r, last error = %r' % (
retval, n.value, self._GetLastError()))
remaining -= n.value
if not remaining:
break
text = text[n.value:]
except Exception, e:
complain('%s.write: %r' % (self.name, e))
raise
class WinUnicodeOutput(WinUnicodeOutputBase):
"""Output adaptor to a file output on Windows.
If the standard FileWrite function is used, it will be encoded in the current
code page. WriteConsoleW() permits writing any character.
"""
def __init__(self, stream, fileno, encoding):
super(WinUnicodeOutput, self).__init__(
fileno, '<Unicode redirected %s>' % stream.name, encoding)
# Output stream
self._stream = stream
# Flush right now.
self.flush()
def flush(self):
try:
self._stream.flush()
except Exception, e:
complain('%s.flush: %r from %r' % (self.name, e, self._stream))
raise
def write(self, text):
try:
if isinstance(text, unicode):
# Replace characters that cannot be printed instead of failing.
text = text.encode(self.encoding, 'replace')
self._stream.write(text)
except Exception, e:
complain('%s.write: %r' % (self.name, e))
raise
def win_handle_is_a_console(handle):
"""Returns True if a Windows file handle is a handle to a console."""
# These types are available on linux but not Mac.
# pylint: disable=E0611,F0401
from ctypes import byref, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
INVALID_HANDLE_VALUE = DWORD(-1).value
# <http://msdn.microsoft.com/en-us/library/ms683167.aspx>
GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(
('GetConsoleMode', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/aa364960.aspx>
GetFileType = WINFUNCTYPE(DWORD, DWORD)(('GetFileType', windll.kernel32))
# GetStdHandle returns INVALID_HANDLE_VALUE, NULL, or a valid handle.
if handle == INVALID_HANDLE_VALUE or handle is None:
return False
return (
(GetFileType(handle) & ~FILE_TYPE_REMOTE) == FILE_TYPE_CHAR and
GetConsoleMode(handle, byref(DWORD())))
def win_get_unicode_stream(stream, excepted_fileno, output_handle, encoding):
"""Returns a unicode-compatible stream.
This function will return a direct-Console writing object only if:
- the file number is the expected console file number
- the handle the expected file handle
- the 'real' handle is in fact a handle to a console.
"""
old_fileno = getattr(stream, 'fileno', lambda: None)()
if old_fileno == excepted_fileno:
# These types are available on linux but not Mac.
# pylint: disable=E0611,F0401
from ctypes import windll, WINFUNCTYPE
from ctypes.wintypes import DWORD, HANDLE
# <http://msdn.microsoft.com/en-us/library/ms683231.aspx>
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(('GetStdHandle', windll.kernel32))
real_output_handle = GetStdHandle(DWORD(output_handle))
if win_handle_is_a_console(real_output_handle):
# It's a console.
return WinUnicodeConsoleOutput(
real_output_handle, old_fileno, stream.name, encoding)
# It's something else. Create an auto-encoding stream.
return WinUnicodeOutput(stream, old_fileno, encoding)
def fix_win_console(encoding):
"""Makes Unicode console output work independently of the current code page.
This also fixes <http://bugs.python.org/issue1602>.
Credit to Michael Kaplan
<http://blogs.msdn.com/b/michkap/archive/2010/04/07/9989346.aspx> and
TZOmegaTZIOY
<http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/1432462#1432462>.
"""
if (isinstance(sys.stdout, WinUnicodeOutputBase) or
isinstance(sys.stderr, WinUnicodeOutputBase)):
return False
try:
# SetConsoleCP and SetConsoleOutputCP could be used to change the code page
# but it's not really useful since the code here is using WriteConsoleW().
# Also, changing the code page is 'permanent' to the console and needs to be
# reverted manually.
# In practice one needs to set the console font to a TTF font to be able to
# see all the characters but it failed for me in practice. In any case, it
# won't throw any exception when printing, which is the important part.
# -11 and -12 are defined in stdio.h
sys.stdout = win_get_unicode_stream(sys.stdout, 1, -11, encoding)
sys.stderr = win_get_unicode_stream(sys.stderr, 2, -12, encoding)
# TODO(maruel): Do sys.stdin with ReadConsoleW(). Albeit the limitation is
# "It doesn't appear to be possible to read Unicode characters in UTF-8
# mode" and this appears to be a limitation of cmd.exe.
except Exception, e:
complain('exception %r while fixing up sys.stdout and sys.stderr' % e)
return True
def fix_encoding():
"""Fixes various encoding problems on all platforms.
Should be called at the very beginning of the process.
"""
ret = True
if sys.platform == 'win32':
ret &= fix_win_codec()
ret &= fix_default_encoding()
if sys.platform == 'win32':
encoding = sys.getdefaultencoding()
ret &= fix_win_sys_argv(encoding)
ret &= fix_win_console(encoding)
return ret
|
PowerKiKi/mqueue
|
library/searchengine/nova/fix_encoding.py
|
Python
|
mit
| 11,785
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ taiga_database_username }}',
'USER': '{{ taiga_database_username }}',
'PASSWORD': '{{ taiga_database_password }}',
'HOST': '{{ taiga_database_host }}',
'PORT': '{{ taiga_database_port }}',
}
}
HOST="http://{{ taiga_hostname }}:{{ taiga_port }}"
#MEDIA_ROOT = '/home/taiga/media'
#STATIC_ROOT = '/home/taiga/static'
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_HOST = "{{ taiga_email_host }}"
EMAIL_HOST_USER = "{{ taiga_aws_access_key_id }}"
EMAIL_HOST_PASSWORD = "{{ taiga_aws_secret_access_key }}"
EMAIL_PORT = "{{ taiga_email_port }}"
DEFAULT_FROM_EMAIL = "{{ taiga_from_email_address }}"
# THROTTLING
#REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"] = {
# "anon": "20/min",
# "user": "200/min",
# "import-mode": "20/sec"
#}
# GITHUB SETTINGS
#GITHUB_URL = "https://github.com/"
#GITHUB_API_URL = "https://api.github.com/"
#GITHUB_API_CLIENT_ID = "yourgithubclientid"
#GITHUB_API_CLIENT_SECRET = "yourgithubclientsecret"
|
JScott/ansible-taiga
|
templates/opt/taiga/back/settings/local.py
|
Python
|
mit
| 1,982
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imagr_app', '0002_auto_20141106_1538'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='album',
name='title',
field=models.CharField(max_length=60),
preserve_default=True,
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=60),
preserve_default=True,
),
]
|
CharlesGust/django-imagr
|
imagr_site/imagr_app/migrations/0003_auto_20141114_1441.py
|
Python
|
mit
| 1,007
|
""" simple cache based on shelve
"""
import shelve
import time
def load_cache(cache_file):
cache = shelve.open(cache_file)
return cache
def read_key(cache, key, ttl):
""" Reads value from cache, if doesnt exist or is older than ttl, raises KeyError
"""
bibtex, timestamp = cache[key]
if (timestamp + ttl) < time.time():
raise KeyError("Cached entry is too old")
else:
return bibtex
def save_key(cache, key, value):
cache[key] = (value, time.time())
|
ryanvarley/adsbibtex
|
adsbibtex/adsbibtex_cache.py
|
Python
|
mit
| 505
|
# -*- coding: utf-8 -*-
import platform
import os
import sys
from .constants import (MYSTEM_BIN, MYSTEM_EXE, MYSTEM_DIR)
_TARBALL_URLS = {
'linux': {
'32bit': "http://download.cdn.yandex.net/mystem/mystem-3.0-linux3.5-32bit.tar.gz",
'64bit': "http://download.cdn.yandex.net/mystem/mystem-3.0-linux3.1-64bit.tar.gz",
},
'darwin': "http://download.cdn.yandex.net/mystem/mystem-3.0-macosx10.8.tar.gz",
'win': {
'32bit': "http://download.cdn.yandex.net/mystem/mystem-3.0-win7-32bit.zip",
'64bit': "http://download.cdn.yandex.net/mystem/mystem-3.0-win7-64bit.zip",
},
'freebsd': {
'64bit': "http://download.cdn.yandex.net/mystem/mystem-3.0-freebsd9.0-64bit.tar.gz",
}
}
def autoinstall(out=sys.stderr):
"""
Install mystem binary as :py:const:`~pymystem3.constants.MYSTEM_BIN`.
Do nothing if already installed.
"""
if os.path.isfile(MYSTEM_BIN):
return
install(out)
def install(out=sys.stderr):
"""
Install mystem binary as :py:const:`~pymystem3.constants.MYSTEM_BIN`.
Overwrite if already installed.
"""
import requests
import tempfile
url = _get_tarball_url()
print("Installing mystem to %s from %s" % (MYSTEM_BIN, url), file=out)
if not os.path.isdir(MYSTEM_DIR):
os.makedirs(MYSTEM_DIR)
tmp_fd, tmp_path = tempfile.mkstemp()
try:
r = requests.get(url, stream=True)
with os.fdopen(tmp_fd, 'wb') as fd:
for chunk in r.iter_content(64 * 1024):
fd.write(chunk)
fd.flush()
if url.endswith('.tar.gz'):
import tarfile
with tarfile.open(tmp_path) as tar:
tar.extract(MYSTEM_EXE, MYSTEM_DIR)
elif url.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tmp_path) as zip:
zip.extractall(MYSTEM_DIR)
else:
raise NotImplementedError("Could not install mystem from %s" % url)
finally:
os.unlink(tmp_path)
def _get_on_prefix(kvs, key):
for k, v in kvs.items():
if key.startswith(k):
return v
return None
def _get_tarball_url():
bits, _ = platform.architecture()
url = _get_on_prefix(_TARBALL_URLS, sys.platform)
if url is None:
raise NotImplementedError("Your system is not supported. Feel free to report bug or make a pull request.")
if isinstance(url, str):
return url
url = url.get(bits, None)
if url is None:
raise NotImplementedError("Your system is not supported. Feel free to report bug or make a pull request.")
return url
|
hatbot-team/hatbot_resources
|
preparation/lang_utils/pymystem/installer.py
|
Python
|
mit
| 2,655
|
"""This file is useful only if 'salesforce' is a duplicit name in Django registry
then put a string 'salesforce.apps.SalesforceDb' instead of simple 'salesforce'
"""
from django.apps import AppConfig
class SalesforceDb(AppConfig):
name = 'salesforce'
label = 'salesforce_db'
|
django-salesforce/django-salesforce
|
salesforce/apps.py
|
Python
|
mit
| 286
|
# argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.mat')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(endo_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
# pull JSON data into python dict
json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict
json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n')
f.close()
|
wclark3/dsge-models
|
dsf_mfiles/save_results.py
|
Python
|
mit
| 1,975
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/corellia/player_house_deed/shared_corellia_house_large_deed.iff"
result.attribute_template_id = 2
result.stfName("deed","corellia_house_large_deed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/deed/corellia/player_house_deed/shared_corellia_house_large_deed.py
|
Python
|
mit
| 490
|
import os, sys
import tempfile
import unittest as unittest0
try:
unittest0.skipUnless
unittest0.skip
except AttributeError:
import unittest2 as unittest
else:
unittest = unittest0
del unittest0
import win32file
from winsys import fs
class TestDrive (unittest.TestCase):
#
# The name of the drive should be normalised:
# lowercase-letter;colon;backslash
#
def test_name (self):
names = ["C", "C:", "C:/", "C:\\"]
for name in names:
self.assertEquals (fs.drive (name).name, "c:\\")
self.assertEquals (fs.drive (name.lower ()).name, "c:\\")
def test_DriveType (self):
self.assertEquals (fs.drive ("C:").type, win32file.GetDriveTypeW ("C:"))
def test_DriveRoot (self):
self.assertEquals (fs.drive ("C:").root, fs.dir ("C:\\"))
def test_volume (self):
self.assertEquals (fs.drive ("C:").volume.name, win32file.GetVolumeNameForVolumeMountPoint ("C:\\"))
@unittest.skip ("Skip destructive test")
def test_mount (self):
#
# Difficult to test because it's not possible
# to mount a volume on two drive letters simultaneously.
# Try to find something unimportant, like a CDROM, and
# dismount it before remounting it.
#
pass
@unittest.skip ("Skip destructive test")
def test_dismount (self):
#
# Likewise difficult to test because destructive
#
pass
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press enter...")
|
frankyrumple/ope
|
laptop_credential/winsys/tests/test_fs/test_drive.py
|
Python
|
mit
| 1,460
|
"""
Experimental agent implementation running separate threads for emulation and GPU training.
This is slightly (estimate ~20%) faster than the sequential implementation, but results might be different.
Copyright 2016 Rasmus Larsen
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE.txt file for details.
"""
from Agent import Agent
import random
import threading
import time
import numpy as np
class ParallelAgent(Agent):
def __init__(self, emu, net, config):
super(ParallelAgent, self).__init__(emu, net, config)
self.gpu_lock = threading.Lock()
self.testing = False
def train(self):
cpu = threading.Thread(target=self.ale_worker)
cpu.setDaemon(True)
gpu_1 = threading.Thread(target=self.gpu_worker)
gpu_2 = threading.Thread(target=self.gpu_worker)
for i in xrange(int(self.train_start)): # wait for replay memory to fill
self.next(random.randrange(self.emu.num_actions))
cpu.start()
gpu_1.start()
gpu_2.start()
gpu_1.join()
gpu_2.join()
return
def test(self):
self.testing = True
time.sleep(0.5) # wait a bit for ALE worker to stop
super(ParallelAgent, self).test()
self.testing = False
def ale_worker(self):
"""
Performs epsilon greedy action selection, updating the replay memory and emulating with ALE.
"""
while True:
if self.testing:
time.sleep(0.2)
continue
self.eps_greedy()
def gpu_worker(self):
"""
Gathers a minibatch (on the CPU!) and feeds it to the GPU. Several can run at once, locking the GPU.
"""
while self.steps < self.train_frames:
s, a, r, ns, t = self.mem.get_minibatch() # TODO: ReplayMemory is _not_ thread safe
a = self.emu.onehot_actions(a) # necessary due to tensorflow not having proper indexing
with self.gpu_lock:
cost = self.net.train(s, a, r, ns, t)
if self.steps % self.target_sync == 0:
self.net.sync_target()
if self.steps % self.test_freq == 0:
self.test()
self.steps += 1
if self.steps % 100 == 0: # TODO: remove, just for debugging
print 'step ' + str(self.steps)
|
rlrs/deep-rl
|
dqn/ParallelAgent.py
|
Python
|
mit
| 2,448
|
n = int(raw_input('Ingrese n: '))
es_primo = True
d = 2
while d < n:
if n % d == 0:
es_primo = False
d = d + 1
if es_primo:
print(n, 'es primo')
else:
print(n, 'es compuesto')
|
sebastiandres/iwi131
|
ipynb/06-Funciones/iwi131_code/es_primo_v1.py
|
Python
|
cc0-1.0
| 200
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.models.building_location_choice_model import BuildingLocationChoiceModel as UrbansimBuildingLocationChoiceModel
from numpy import where, arange, zeros
from numpy import logical_or, logical_not
from opus_core.variables.variable_name import VariableName
from opus_core.resources import Resources
from opus_core.datasets.dataset import Dataset
class BuildingLocationChoiceModel(UrbansimBuildingLocationChoiceModel):
# def get_weights_for_sampling_locations(self, agent_set, agents_index, data_objects=None):
# where_developable = where(self.apply_filter(self.filter, None, agent_set, agents_index, data_objects=data_objects))[0]
# weight_array = ones((where_developable.size), dtype=int8) #.astype(bool8)
# return (weight_array, where_developable)
def get_weights_for_sampling_locations_for_estimation(self, agent_set, agents_index):
if self.run_config.get("agent_units_string", None): # needs to be corrected
agent_set.compute_variables(self.run_config["agent_units_string"], dataset_pool=self.dataset_pool)
return self.get_weights_for_sampling_locations(agent_set, agents_index)
def prepare_for_estimate(self, add_member_prefix=True,
specification_dict=None,
specification_storage=None,
specification_table=None,
building_set=None,
buildings_for_estimation_storage=None,
buildings_for_estimation_table=None,
constants=None, base_year=0,
building_categories=None,
location_id_variable=None,
join_datasets=False,
data_objects=None, **kwargs):
# buildings = None
if (building_set is not None):
if location_id_variable is not None:
building_set.compute_variables(location_id_variable, resources=Resources(data_objects))
# create agents for estimation
if buildings_for_estimation_storage is not None:
estimation_set = Dataset(in_storage=buildings_for_estimation_storage,
in_table_name=buildings_for_estimation_table,
id_name=building_set.get_id_name(),
dataset_name=building_set.get_dataset_name())
if location_id_variable:
estimation_set.compute_variables(location_id_variable,
resources=Resources(data_objects))
# needs to be a primary attribute because of the join method below
estimation_set.add_primary_attribute(estimation_set.get_attribute(location_id_variable),
VariableName(location_id_variable).alias())
years = estimation_set.get_attribute("scheduled_year")
recent_years = constants['recent_years']
indicator = zeros(estimation_set.size(), dtype="int32")
for year in range(base_year-recent_years, base_year+1):
indicator = logical_or(indicator, years==year)
idx = where(logical_not(indicator))[0]
estimation_set.remove_elements(idx)
#if filter:
#estimation_set.compute_variables(filter, resources=Resources(data_objects))
#index = where(estimation_set.get_attribute(filter) > 0)[0]
#estimation_set.subset_by_index(index, flush_attributes_if_not_loaded=False)
if join_datasets:
building_set.join_by_rows(estimation_set,
require_all_attributes=False,
change_ids_if_not_unique=True)
index = arange(building_set.size()-estimation_set.size(), building_set.size())
else:
index = building_set.get_id_index(estimation_set.get_id_attribute())
else:
if building_set is not None:
index = arange(building_set.size())
else:
index = None
if add_member_prefix:
specification_table = self.group_member.add_member_prefix_to_table_names([specification_table])
from opus_core.model import get_specification_for_estimation
#from urbansim.functions import compute_supply_and_add_to_location_set
specification = get_specification_for_estimation(specification_dict,
specification_storage,
specification_table)
#specification, dummy = AgentLocationChoiceModelMember.prepare_for_estimate(self, add_member_prefix,
#specification_dict, specification_storage,
#specification_table,
#location_id_variable=location_id_variable,
#data_objects=data_objects, **kwargs)
return (specification, index)
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim_parcel/models/building_location_choice_model.py
|
Python
|
gpl-2.0
| 5,528
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-29 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("issuetracker", "0004_data_migration_set_bugzilla_to_allow_add_case_to_issue"),
]
operations = [
migrations.RemoveField(
model_name="issuetracker",
name="issue_report_fmt",
),
migrations.AddField(
model_name="issuetracker",
name="issue_report_templ",
field=models.CharField(
blank=True,
default="",
help_text="The issue content template, which could be arbitrary text with format arguments. Nitrate provides these format arguments: <code>TestBuild.name</code>, <code>setup</code>, <code>action</code> and <code>effect</code>. The text is formatted with keyward arguments.",
max_length=255,
),
),
migrations.AlterField(
model_name="issuetracker",
name="issue_report_params",
field=models.CharField(
blank=True,
default="",
help_text="Parameters used to format URL for reporting issue. Each line is a <code>key:value</code> pair of parameters. Nitrate provides a few parameters to format URL and additional parameters could be provided by system administrator as well. ",
max_length=255,
),
),
]
|
Nitrate/Nitrate
|
src/tcms/issuetracker/migrations/0005_adjust_issue_report_fields.py
|
Python
|
gpl-2.0
| 1,490
|