code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Script to generate plots with random data points
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 50, 50)
y = x + np.random.random_sample(50)*10
plt.figure(figsize=(3, 2), dpi=100)
plt.plot(x, y, 'co', linewidth=4.0)
plt.savefig('noisy_data.png')
plt.show()
| zhouhaner/WebPlotDigitizer | scripts/noisyData.py | Python | gpl-3.0 | 285 |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for keras layers."""
import tensorflow as tf
layers = tf.keras.layers
initializer = tf.keras.initializers.glorot_normal()
rate = 0.7
def norm_layer(tensor, normalization):
if normalization.lower() == 'batchnorm':
tensor = layers.BatchNormalization()(tensor)
return tensor
def upconv(tensor, nfilters, size, strides,
alpha_lrelu=0.2, normalization='None'):
"""Upconvolution as upsampling and convolution."""
tensor = layers.UpSampling2D()(tensor)
tensor = layers.Conv2D(nfilters, size,
strides=strides,
padding='same',
kernel_initializer=initializer,
use_bias=False)(tensor)
tensor = norm_layer(tensor, normalization)
tensor = layers.LeakyReLU(alpha=alpha_lrelu)(tensor)
if normalization.lower() == 'dropout':
tensor = layers.Dropout(rate)(tensor)
return tensor
def conv_block_3d(tensor, nfilters, size, strides,
alpha_lrelu=0.2, normalization='None', relu=True):
"""3D convolution block with normalization and leaky relu."""
tensor = layers.Conv3D(nfilters, size,
strides=strides,
padding='same',
kernel_initializer=initializer,
use_bias=False)(tensor)
tensor = norm_layer(tensor, normalization)
if relu:
tensor = layers.LeakyReLU(alpha=alpha_lrelu)(tensor)
if normalization.lower() == 'dropout':
tensor = layers.Dropout(rate)(tensor)
return tensor
def conv_t_block_3d(tensor, nfilters, size, strides,
alpha_lrelu=0.2, normalization='None', relu=True):
"""2D transpose convolution block with normalization and leaky relu."""
tensor = layers.Conv3DTranspose(nfilters, size,
strides=strides,
padding='same',
kernel_initializer=initializer,
use_bias=False)(tensor)
tensor = norm_layer(tensor, normalization)
if relu:
tensor = layers.LeakyReLU(alpha=alpha_lrelu)(tensor)
if normalization.lower() == 'dropout':
tensor = layers.Dropout(rate)(tensor)
return tensor
def conv_block_2d(tensor, nfilters, size, strides,
alpha_lrelu=0.2, normalization='None'):
"""2D convolution block with normalization and leaky relu."""
tensor = layers.Conv2D(nfilters, size,
strides=strides,
padding='same',
kernel_initializer=initializer,
use_bias=False)(tensor)
tensor = norm_layer(tensor, normalization)
tensor = layers.LeakyReLU(alpha=alpha_lrelu)(tensor)
if normalization.lower() == 'dropout':
tensor = layers.Dropout(rate)(tensor)
return tensor
def conv_t_block_2d(tensor, nfilters, size, strides,
alpha_lrelu=0.2, normalization='None'):
"""2D transpose convolution block with normalization and leaky relu."""
tensor = layers.Conv2DTranspose(nfilters, size,
strides=strides,
padding='same',
kernel_initializer=initializer,
use_bias=False)(tensor)
tensor = norm_layer(tensor, normalization)
tensor = layers.LeakyReLU(alpha=alpha_lrelu)(tensor)
if normalization.lower() == 'dropout':
tensor = layers.Dropout(rate)(tensor)
return tensor
def residual_block_2d(x, nfilters, strides=(1, 1), normalization='None'):
"""2D residual block."""
shortcut = x
x = layers.Conv2D(nfilters,
kernel_size=(3, 3),
strides=strides,
padding='same',
kernel_initializer=initializer)(x)
x = norm_layer(x, normalization)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(nfilters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(x)
x = norm_layer(x, normalization)
if strides != (1, 1):
shortcut = layers.Conv2D(nfilters,
kernel_size=(1, 1),
strides=strides,
padding='same')(shortcut)
x = norm_layer(x, normalization)
x = layers.add([shortcut, x])
x = layers.LeakyReLU()(x)
return x
def residual_block_3d(x, nfilters, strides=(1, 1, 1), normalization='None'):
"""3D residual block."""
shortcut = x
x = layers.Conv3D(nfilters,
kernel_size=(3, 3, 3),
strides=strides,
padding='same',
kernel_initializer=initializer)(x)
x = norm_layer(x, normalization)
x = layers.LeakyReLU()(x)
x = layers.Conv3D(nfilters,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding='same',
kernel_initializer=initializer)(x)
x = norm_layer(x, normalization)
if strides != (1, 1, 1):
shortcut = layers.Conv3D(nfilters,
kernel_size=(1, 1, 1),
strides=strides,
padding='same')(shortcut)
x = norm_layer(x, normalization)
x = layers.add([shortcut, x])
x = layers.LeakyReLU()(x)
return x
| tensorflow/graphics | tensorflow_graphics/projects/neural_voxel_renderer/layers.py | Python | apache-2.0 | 5,993 |
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see http://choosealicense.com/licenses/bsd-3-clause/ for more details.
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
('cros', 'chromeos'),
('iphone|ios', 'iphone'),
('ipad', 'ipad'),
(r'darwin|mac|os\s*x', 'macos'),
('win', 'windows'),
(r'android', 'android'),
(r'x11|lin(\b|ux)?', 'linux'),
('(sun|i86)os', 'solaris'),
(r'nintendo\s+wii', 'wii'),
('irix', 'irix'),
('hp-?ux', 'hpux'),
('aix', 'aix'),
('sco|unix_sv', 'sco'),
('bsd', 'bsd'),
('amiga', 'amiga'),
('blackberry|playbook', 'blackberry'),
('symbian','symbian')
)
browsers = (
('googlebot', 'google'),
('msnbot', 'msn'),
('yahoo', 'yahoo'),
('ask jeeves', 'ask'),
(r'aol|america\s+online\s+browser', 'aol'),
('opera', 'opera'),
('chrome', 'chrome'),
('firefox|firebird|phoenix|iceweasel', 'firefox'),
('galeon', 'galeon'),
('safari', 'safari'),
('webkit', 'webkit'),
('camino', 'camino'),
('konqueror', 'konqueror'),
('k-meleon', 'kmeleon'),
('netscape', 'netscape'),
(r'msie|microsoft\s+internet\s+explorer|trident/.+? rv:', 'msie'),
('lynx', 'lynx'),
('links', 'links'),
('seamonkey|mozilla', 'seamonkey')
)
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
_language_re = re.compile(
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [(b, re.compile(self._browser_version_re % a))
for a, b in self.browsers]
def __call__(self, user_agent):
for platform, regex in self.platforms:
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers:
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `bsd`
- `chromeos`
- `hpux`
- `iphone`
- `ipad`
- `irix`
- `linux`
- `macos`
- `sco`
- `solaris`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `camino`
- `chrome`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers maked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, string):
self.string = string
self.platform, self.browser, self.version, self.language = self._parser(string)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return '<%s %r/%s>' % (
self.__class__.__name__,
self.browser,
self.version
)
| hitsl/bouser | bouser/web/useragents.py | Python | isc | 4,901 |
#!/usr/bin/env python
from peyotl.utility import get_logger, ConfigWrapper
from peyotl.ott import OTT
import subprocess
import sys
import os
_LOG = get_logger('clipeyotl')
out = sys.stdout
def parse_config_file(fp):
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
if not os.path.exists(fp):
raise RuntimeError('The config filepath "{fp}" does not exist.'.format(fp=fp))
config_obj = SafeConfigParser()
config_obj.read(fp)
return config_obj
def config_command(args):
if args.action.lower() == 'list':
fp = args.filepath
if fp:
fp = os.path.abspath(fp)
cfg = parse_config_file(fp)
cw = ConfigWrapper(raw_config_obj=cfg, config_filename=fp)
else:
cw = ConfigWrapper()
cw.report(out)
def ott_clear_command(args):
ott = OTT()
ott.remove_caches()
def ott_shell_command(args):
ott = OTT()
_LOG.info('launching bash in your OTT dir...')
if subprocess.Popen('bash', cwd=ott.ott_dir).wait() != 0:
raise RuntimeError('bash in ott dir failed.')
def main():
import argcomplete
import argparse
parser = argparse.ArgumentParser(prog='cli-peyotl.py')
subparsers = parser.add_subparsers(help='available commands')
# config commands
config_parser = subparsers.add_parser('config', help='reports information about your peyotl configuration')
config_parser.add_argument('-a', '--action', choices=['list'], default='list', required=False)
config_parser.add_argument('-f', '--filepath', type=str, default=None, required=False)
config_parser.set_defaults(func=config_command)
# ott commands
ott_parser = subparsers.add_parser('ott', help='commands that require a local version of ott')
#ott_parser.add_argument('--action', choices=['clear-cache'], default='', required=False)
ott_subparsers = ott_parser.add_subparsers(help='ott actions')
ott_clear_parser = ott_subparsers.add_parser('clear-cache',
help='remove the caches used to speed up actions on OTT')
ott_clear_parser.set_defaults(func=ott_clear_command)
ott_shell_parser = ott_subparsers.add_parser('bash',
help='execute bash command in the top dir of your copy of OTT')
ott_shell_parser.set_defaults(func=ott_shell_command)
argcomplete.autocomplete(parser)
args = parser.parse_args(sys.argv[1:])
try:
args.func(args)
except:
_LOG.exception('peyotl.py terminating due to an exception')
sys.exit(1)
if __name__ == '__main__':
main()
| rvosa/peyotl | scripts/clipeyotl.py | Python | bsd-2-clause | 2,721 |
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('dojo', '0054_dojometa_finding'),
]
operations = [
migrations.AlterField(
model_name='notifications',
name='jira_update',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('hipchat', 'hipchat'), ('mail', 'mail'), ('alert', 'alert')], default='alert', help_text='JIRA sync happens in the background, errors will be shown as notifications/alerts so make sure to subscribe', max_length=24, verbose_name='JIRA problems'),
),
]
| rackerlabs/django-DefectDojo | dojo/db_migrations/0055_notifications_jira_update_verbose_name.py | Python | bsd-3-clause | 669 |
# Copyright 2019 Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| ament/ament_lint | ament_copyright/test/cases/bsd_license_tabs/case.py | Python | apache-2.0 | 1,632 |
from __future__ import print_function
import sys
from pyxb.utils.six.moves.urllib import request as urllib_request
import GeoCoder
from pyxb import BIND
from pyxb.utils import domutils
import pyxb.bundles.wssplat.soap11 as soapenv
import pyxb.bundles.wssplat.soapenc as soapenc
address = '1600 Pennsylvania Ave., Washington, DC'
if 1 < len(sys.argv):
address = sys.argv[1]
env = soapenv.Envelope(Body=BIND(GeoCoder.geocode(address)))
uri = urllib_request.Request('http://rpc.geocoder.us/service/soap/',
env.toxml("utf-8"),
{ 'SOAPAction' : "http://rpc.geocoder.us/Geo/Coder/US#geocode", 'Content-Type': 'text/xml' } )
rxml = urllib_request.urlopen(uri).read()
#open('response.xml', 'w').write(rxml)
#rxml = open('response.xml').read()
response = soapenv.CreateFromDocument(rxml)
# OK, here we get into ugliness due to WSDL's concept of schema in the
# SOAP encoding not being consistent with XML Schema, even though it
# uses the same namespace. See
# http://tech.groups.yahoo.com/group/soapbuilders/message/5879. In
# short, the WSDL spec shows an example using soapenc:Array where a
# restriction was used to set the value of the wsdl:arrayType
# attribute. This restriction failed to duplicate the element content
# of the base type, resulting in a content type of empty in the
# restricted type. Consequently, PyXB can't get the information out
# of the DOM node, and we have to skip over the wildcard items to find
# something we can deal with.
# As further evidence the folks who designed SOAP 1.1 didn't know what
# they were doing, the encodingStyle attribute that's supposed to go
# in the Envelope can't validly be present there, since it's not
# listed and it's not in the namespace admitted by the attribute
# wildcard. Fortunately, PyXB doesn't currently validate wildcards.
encoding_style = response.wildcardAttributeMap().get(soapenv.Namespace.createExpandedName('encodingStyle'))
items = []
if encoding_style == soapenc.Namespace.uri():
gcr = response.Body.wildcardElements()[0]
soap_array = gcr.wildcardElements()[0]
items = soap_array.wildcardElements()
else:
pass
for item in items:
if (item.lat is None) or item.lat._isNil():
print('Warning: Address did not resolve')
print('''
%s %s %s %s %s
%s, %s %s
%s %s''' % (item.number, item.prefix, item.street, item.type, item.suffix,
item.city, item.state, item.zip,
item.lat, item.long))
| CantemoInternal/pyxb | examples/geocoder/client.py | Python | apache-2.0 | 2,471 |
from issues.models import ReportedLink, ReportedUser
from issues.serializers import ReportedLinkSerializer, ReportedUserSerializer
class ReportedLinkAPI(object):
serializer_class = ReportedLinkSerializer
def get_queryset(self):
return ReportedLink.objects.all()
class ReportedLinkSelfAPI(object):
def get_queryset(self):
return ReportedLink.objects.filter(reporter=self.request.user)
def pre_save(self, obj):
obj.reporter = self.request.user
class ReportedUserAPI(object):
serializer_class = ReportedUserSerializer
def get_queryset(self):
return ReportedUser.objects.all()
class ReportedUserSelfAPI(object):
def get_queryset(self):
return ReportedUser.objects.filter(reporter=self.request.user)
def pre_save(self, obj):
obj.reporter = self.request.user
| projectweekend/Links-API | links/issues/mixins.py | Python | mit | 849 |
#!/usr/bin/env python2.7
from mpi4py import MPI
import random
start_time = MPI.Wtime()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mpisize = comm.Get_size()
nsamples = int(12e7/mpisize)
inside = 0
random.seed(rank)
for i in range(nsamples):
x = random.random()
y = random.random()
if (x*x)+(y*y)<1:
inside += 1
mypi = (4.0 * inside)/nsamples
pi = comm.reduce(mypi, op=MPI.SUM, root=0)
end_time = MPI.Wtime()
if rank==0:
print (1.0 / mpisize)*pi
print "Run time for %s was %s" %(nsamples,end_time-start_time)
| wscullin/ACCA-CS | src/pi/mpi_pi.py | Python | bsd-3-clause | 551 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Integration tests for NAME to GRIB2 interoperability."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import warnings
import iris
def name_cb(cube, field, filename):
# NAME files give the time point at the end of the range but Iris'
# GRIB loader creates it in the middle (the GRIB file itself doesn't
# encode a time point). Here we make them consistent so we can
# easily compare them.
t_coord = cube.coord("time")
t_coord.points = t_coord.bounds[0][1]
fp_coord = cube.coord("forecast_period")
fp_coord.points = fp_coord.bounds[0][1]
# NAME contains extra vertical meta-data.
z_coord = cube.coords("height")
if z_coord:
z_coord[0].standard_name = "height"
z_coord[0].long_name = "height above ground level"
@tests.skip_grib
class TestNameToGRIB(tests.IrisTest):
def check_common(self, name_cube, grib_cube):
self.assertTrue(np.allclose(name_cube.data, name_cube.data))
self.assertTrue(
np.allclose(
name_cube.coord("latitude").points,
grib_cube.coord("latitude").points,
)
)
self.assertTrue(
np.allclose(
name_cube.coord("longitude").points,
grib_cube.coord("longitude").points - 360,
)
)
for c in ["height", "time"]:
if name_cube.coords(c):
self.assertEqual(name_cube.coord(c), grib_cube.coord(c))
@tests.skip_data
def test_name2_field(self):
filepath = tests.get_data_path(("NAME", "NAMEII_field.txt"))
name_cubes = iris.load(filepath)
# There is a known load/save problem with numerous
# gribapi/eccodes versions and
# zero only data, where min == max.
# This may be a problem with data scaling.
for i, name_cube in enumerate(name_cubes):
data = name_cube.data
if np.min(data) == np.max(data):
msg = (
'NAMEII cube #{}, "{}" has empty data : '
"SKIPPING test for this cube, as save/load will "
"not currently work."
)
warnings.warn(msg.format(i, name_cube.name()))
continue
with self.temp_filename(".grib2") as temp_filename:
iris.save(name_cube, temp_filename)
grib_cube = iris.load_cube(temp_filename, callback=name_cb)
self.check_common(name_cube, grib_cube)
self.assertCML(
grib_cube,
tests.get_result_path(
(
"integration",
"name_grib",
"NAMEII",
"{}_{}.cml".format(i, name_cube.name()),
)
),
)
@tests.skip_data
def test_name3_field(self):
filepath = tests.get_data_path(("NAME", "NAMEIII_field.txt"))
name_cubes = iris.load(filepath)
for i, name_cube in enumerate(name_cubes):
with self.temp_filename(".grib2") as temp_filename:
iris.save(name_cube, temp_filename)
grib_cube = iris.load_cube(temp_filename, callback=name_cb)
self.check_common(name_cube, grib_cube)
self.assertCML(
grib_cube,
tests.get_result_path(
(
"integration",
"name_grib",
"NAMEIII",
"{}_{}.cml".format(i, name_cube.name()),
)
),
)
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/integration/format_interop/test_name_grib.py | Python | lgpl-3.0 | 4,090 |
#! /usr/bin/env python
"""
A script that provides:
1. Ability to grab binaries where possible from LLVM.
2. Ability to download binaries from MongoDB cache for clang-format.
3. Validates clang-format is the right version.
4. Has support for checking which files are to be checked.
5. Supports validating and updating a set of files to the right coding style.
"""
from __future__ import print_function, absolute_import
import Queue
import difflib
import itertools
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import urllib
from distutils import spawn
from optparse import OptionParser
from multiprocessing import cpu_count
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
from buildscripts.resmokelib.utils import globstar
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "3.6.0"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# URL location of the "cached" copy of clang-format to download
# for users which do not have clang-format installed
CLANG_FORMAT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang-format-rhel55.tar.gz"
# URL on LLVM's website to download the clang tarball
CLANG_FORMAT_SOURCE_URL_BASE = string.Template("http://llvm.org/releases/$version/clang+llvm-$version-$llvm_distro.tar.xz")
# Path in the tarball to the clang-format binary
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template("clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
# Copied from python 2.7 version of subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return ("Command '%s' returned non-zero exit status %d with output %s" %
(self.cmd, self.returncode, self.output))
# Copied from python 2.7 version of subprocess.py
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output)
return output
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args)
def get_llvm_url(version, llvm_distro):
"""Get the url to download clang-format from llvm.org
"""
return CLANG_FORMAT_SOURCE_URL_BASE.substitute(
version=version,
llvm_distro=llvm_distro)
def get_tar_path(version, tar_path):
""" Get the path to clang-format in the llvm tarball
"""
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(
version=version,
tar_path=tar_path)
def extract_clang_format(tar_path):
# Extract just the clang-format binary
# On OSX, we shell out to tar because tarfile doesn't support xz compression
if sys.platform == 'darwin':
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
# Otherwise we use tarfile because some versions of tar don't support wildcards without
# a special flag
else:
tarfp = tarfile.open(tar_path)
for name in tarfp.getnames():
if name.endswith('clang-format'):
tarfp.extract(name)
tarfp.close()
def get_clang_format_from_llvm(llvm_distro, tar_path, dest_file):
"""Download clang-format from llvm.org, unpack the tarball,
and put clang-format in the specified place
"""
# Build URL
url = get_llvm_url(CLANG_FORMAT_VERSION, llvm_distro)
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.xz")
# Download from LLVM
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
extract_clang_format(temp_tar_file)
# Destination Path
shutil.move(get_tar_path(CLANG_FORMAT_VERSION, tar_path), dest_file)
def get_clang_format_from_linux_cache(dest_file):
"""Get clang-format from mongodb's cache
"""
# Get URL
url = CLANG_FORMAT_HTTP_LINUX_CACHE
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.xz")
# Download the file
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
extract_clang_format(temp_tar_file)
# Destination Path
shutil.move("llvm/Release/bin/clang-format", dest_file)
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formating an individual file
"""
def __init__(self, path, cache_dir):
if os.path.exists('/usr/bin/clang-format-3.6'):
clang_format_progname = 'clang-format-3.6'
else:
clang_format_progname = CLANG_FORMAT_PROGNAME
# Initialize clang-format configuration information
if sys.platform.startswith("linux"):
#"3.6.0/clang+llvm-3.6.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz
self.platform = "linux_x64"
self.llvm_distro = "x86_64-linux-gnu-ubuntu"
self.tar_path = "x86_64-linux-gnu"
elif sys.platform == "win32":
self.platform = "windows_x64"
self.llvm_distro = "windows_x64"
self.tar_path = None
clang_format_progname += ".exe"
elif sys.platform == "darwin":
#"3.6.0/clang+llvm-3.6.0-x86_64-apple-darwin.tar.xz
self.platform = "darwin_x64"
self.llvm_distro = "x86_64-apple-darwin"
self.tar_path = "x86_64-apple-darwin"
self.path = None
# Find Clang-Format now
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the envionrment variable
if "MONGO_CLANG_FORMAT" in os.environ:
self.path = os.environ["MONGO_CLANG_FORMAT"]
if self.path and not self._validate_version(warn=True):
self.path = None
# Check the users' PATH environment variable now
if self.path is None:
self.path = spawn.find_executable(clang_format_progname)
if self.path and not self._validate_version(warn=True):
self.path = None
# If Windows, try to grab it from Program Files
if sys.platform == "win32":
win32bin = os.path.join(os.environ["ProgramFiles(x86)"], "LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
# Have not found it yet, download it from the web
if self.path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.path = os.path.join(cache_dir, clang_format_progname)
if not os.path.isfile(self.path):
if sys.platform.startswith("linux"):
get_clang_format_from_linux_cache(self.path)
elif sys.platform == "darwin":
get_clang_format_from_llvm(self.llvm_distro, self.tar_path, self.path)
else:
print("ERROR: clang-format.py does not support downloading clang-format " +
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
# Validate we have the correct version
self._validate_version()
self.print_lock = threading.Lock()
def _validate_version(self, warn=False):
"""Validate clang-format is the expected version
"""
try:
cf_version = callo([self.path, "--version"])
except CalledProcessError:
cf_version = "clang-format call failed."
if CLANG_FORMAT_VERSION in cf_version:
return True
if warn:
print("WARNING: clang-format found in path, but incorrect version found at " +
self.path + " with version: " + cf_version)
return False
def lint(self, file_name):
"""Check the specified file has the correct format
"""
with open(file_name, 'r') as original_text:
original_file = original_text.read()
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s" %
(self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def format(self, file_name):
"""Update the format of the specified file
"""
# Update the file with clang-format
return not subprocess.call([self.path, "--style=file", "-i", file_name])
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
# print("Running across %d cpus" % (cpus))
task_queue = Queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in site_scons/buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip()
except:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
paths = [base_dir]
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
# Get candidate files
self.candidate_files = self._get_candidate_files()
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions back to ~1.8
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by doing an intersection
between the input list, and the list of candidates in the repository
Returns the full path to the file for clang-format to consume.
"""
# NOTE: Files may have an absolute root (i.e. leading /)
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def get_candidate_files(self):
"""Get a list of candidate files
"""
return self._get_candidate_files()
def _get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
gito = self._callgito(["ls-files"])
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip()
for line in gito.splitlines() if "src" in line and not "src/third_party" in line]
files_match = re.compile('\\.(h|cpp)$')
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def expand_file_string(glob_pattern):
"""Expand a string that represents a set of files
"""
return [os.path.abspath(f) for f in globstar.iglob(glob_pattern)]
def get_files_to_check(files):
"""Filter the specified list of files to check down to the actual
list of files that need to be checked."""
candidates = []
# Get a list of candidate_files
candidates = [expand_file_string(f) for f in files]
candidates = list(itertools.chain.from_iterable(candidates))
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""Take a patch file generated by git diff, and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(r"^diff --git a\/([a-z\/\.\-_0-9]+) b\/[a-z\/\.\-_0-9]+")
lines = []
for patch in patches:
with open(patch, "rb") as infile:
lines += infile.readlines()
candidates = [check.match(line).group(1) for line in lines if check.match(line)]
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def _get_build_dir():
"""Get the location of the scons' build directory in case we need to download clang-format
"""
return os.path.join(get_base_dir(), "build")
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
lint_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint_patch(clang_format, infile):
"""Lint patch command entry point
"""
files = get_files_to_check_from_patch(infile)
# Patch may have files that we do not want to check which is fine
if files:
_lint_files(clang_format, files)
def lint(clang_format, glob):
"""Lint files command entry point
"""
files = get_files_to_check(glob)
_lint_files(clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
format_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def format_func(clang_format, glob):
"""Format files command entry point
"""
files = get_files_to_check(glob)
_format_files(clang_format, files)
def usage():
"""Print usage
"""
print("clang-format.py supports 3 commands [ lint, lint-patch, format ]. Run "
" <command> -? for more information")
def main():
"""Main entry point
"""
if len(sys.argv) > 1:
command = sys.argv[1]
parser = OptionParser()
parser.add_option("-c", "--clang-format", type="string", dest="clang_format")
if command == "lint":
(options, args) = parser.parse_args(args=sys.argv[2:])
lint(options.clang_format, args)
elif command == "lint-patch":
(options, args) = parser.parse_args(args=sys.argv[2:])
lint_patch(options.clang_format, args)
elif command == "format":
(options, args) = parser.parse_args(args=sys.argv[2:])
format_func(options.clang_format, args)
else:
usage()
else:
usage()
if __name__ == "__main__":
main()
| sanathkumarv/RestAPIWt | tools/mongo-cxx-driver-legacy/site_scons/buildscripts/clang_format.py | Python | apache-2.0 | 19,897 |
def is_isogram(s):
return len(s) == len(set(s.lower()))
| VladKha/CodeWars | 7 kyu/Isograms/solve.py | Python | gpl-3.0 | 60 |
# encoding: utf-8
import woo.config
if 'qt4' in woo.config.features:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
else:
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from woo.qt.ObjectEditor import *
import woo
import woo.qt
from woo.dem import *
#from woo.sparc import *
from woo.core import *
try: from woo.gl import *
except ImportError: pass
class EngineInspector(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self,parent)
grid=QGridLayout(self); grid.setSpacing(0); grid.setContentsMargins(0,0,0,0)
self.serEd=SeqObject(parent=None,getter=lambda:woo.master.scene.engines,setter=lambda x:setattr(woo.master.scene,'engines',x),T=Engine,trait=[t for t in Scene._attrTraits if t.name=='engines'][0],path='woo.master.scene.engines')
grid.addWidget(self.serEd)
self.setLayout(grid)
#class MaterialsInspector(QWidget):
# def __init__(self,parent=None):
# QWidget.__init__(self,parent)
# grid=QGridLayout(self); grid.setSpacing(0); grid.setContentsMargins(0,0,0,0)
# self.serEd=SeqObject(parent=None,getter=lambda:O.materials,setter=lambda x:setattr(O,'materials',x),serType=Engine)
# grid.addWidget(self.serEd)
# self.setLayout(grid)
class CellInspector(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self,parent)
self.layout=QVBoxLayout(self) #; self.layout.setSpacing(0); self.layout.setContentsMargins(0,0,0,0)
self.periCheckBox=QCheckBox('periodic boundary',self)
self.periCheckBox.clicked.connect(self.update)
self.layout.addWidget(self.periCheckBox)
self.scroll=QScrollArea(self); self.scroll.setWidgetResizable(True)
self.layout.addWidget(self.scroll)
self.setLayout(self.layout)
self.refresh()
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refresh)
self.refreshTimer.start(1000)
def refresh(self):
S=woo.master.scene
self.periCheckBox.setChecked(S.periodic)
editor=self.scroll.widget()
if not S.periodic and editor: self.scroll.takeWidget()
if (S.periodic and not editor) or (editor and editor.ser!=S.cell):
self.scroll.setWidget(ObjectEditor(S.cell,parent=self,showType=True,path='woo.master.cell'))
def update(self):
self.scroll.takeWidget() # do this before changing periodicity, otherwise the ObjectEditor will raise exception about None object
S=woo.master.scene
S.periodic=self.periCheckBox.isChecked()
self.refresh()
class SceneInspector(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self,parent)
grid=QGridLayout(self); grid.setSpacing(0); grid.setContentsMargins(0,0,0,0)
self.serEd=ObjectEditor(woo.master.scene,parent=self,showType=False,path='woo.master.scene')
grid.addWidget(self.serEd)
self.setLayout(grid)
def makeBodyLabel(b):
ret=str(b.id)+u' '
if not b.shape: ret+=u'⬚'
else:
typeMap={'Sphere':u'⚫','Facet':u'△','FlexFacet':u'⧋','Wall':u'┃','Box':u'⎕','Cylinder':u'⌭','Clump':u'☍','InfCylinder':u'◎','Ellipsoid':u'⬯','Capsule':u'O'}
ret+=typeMap.get(b.shape.__class__.__name__,u'﹖')
if (b.shape.nodes)==1 and b.blocked!='': ret+=u'⚓'
return ret
def getBodyIdFromLabel(label):
try:
return int(str(label).split()[0])
except ValueError:
print('Error with label:',str(label))
return -1
class BodyInspector(QWidget):
def __init__(self,parId=None,parent=None,bodyLinkCallback=None,intrLinkCallback=None):
QWidget.__init__(self,parent)
self.parId=(0 if parId==None else parId)
if 'opengl' in woo.config.features:
v=woo.qt.views()
if parId==None and len(v)>0 and v[0].selection>0: self.bodyId=v[0].selection
self.idGlSync=self.parId
self.bodyLinkCallback,self.intrLinkCallback=bodyLinkCallback,intrLinkCallback
self.bodyIdBox=QSpinBox(self)
self.bodyIdBox.setMinimum(0)
self.bodyIdBox.setMaximum(1000000000)
self.bodyIdBox.setValue(self.parId)
self.intrWithCombo=QComboBox(self);
self.gotoBodyButton=QPushButton(u'→ #',self)
self.gotoIntrButton=QPushButton(u'→ #+#',self)
# id selector
topBoxWidget=QWidget(self); topBox=QHBoxLayout(topBoxWidget); topBox.setContentsMargins(0,0,0,0); #topBox.setSpacing(0);
hashLabel=QLabel('#',self); hashLabel.setFixedWidth(8)
topBox.addWidget(hashLabel)
topBox.addWidget(self.bodyIdBox)
self.plusLabel=QLabel('+',self); topBox.addWidget(self.plusLabel)
hashLabel2=QLabel('#',self); hashLabel2.setFixedWidth(8); topBox.addWidget(hashLabel2)
topBox.addWidget(self.intrWithCombo)
topBox.addStretch()
topBox.addWidget(self.gotoBodyButton)
topBox.addWidget(self.gotoIntrButton)
topBoxWidget.setLayout(topBox)
# forces display
forcesWidget=QFrame(self); forcesWidget.setFrameShape(QFrame.Box); self.forceGrid=QGridLayout(forcesWidget);
self.forceGrid.setVerticalSpacing(0); self.forceGrid.setHorizontalSpacing(9); self.forceGrid.setContentsMargins(4,4,4,4);
for i,j in itertools.product((0,1,2,3),(-1,0,1,2)):
lab=QLabel('<small>'+('force','torque','move','rot')[i]+'</small>' if j==-1 else ''); self.forceGrid.addWidget(lab,i,j+1);
if j>=0: lab.setAlignment(Qt.AlignRight)
if i>1: lab.hide() # do not show forced moves and rotations by default (they will appear if non-zero)
self.showMovRot=False
#
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setContentsMargins(0,0,0,0)
self.grid.addWidget(topBoxWidget)
self.grid.addWidget(forcesWidget)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.grid.addWidget(self.scroll)
self.tryShowBody()
self.bodyIdBox.valueChanged.connect(self.bodyIdSlot)
self.gotoBodyButton.clicked.connect(self.gotoBodySlot)
self.gotoIntrButton.clicked.connect(self.gotoIntrSlot)
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000)
self.intrWithCombo.addItems(['0']); self.intrWithCombo.setCurrentIndex(0);
self.intrWithCombo.setMinimumWidth(80)
if self.parId==None: self.setWindowTitle('Particle')
else: self.setWindowTitle('Particle #%d'%self.parId)
self.gotoBodySlot()
def displayForces(self):
if self.parId==None: return
S=woo.master.scene
b=S.dem.par[self.parId]
if not b.shape: noshow='no shape'
elif len(b.shape.nodes)==0: noshow='no nodes'
elif len(b.shape.nodes)>1: noshow='multinodal'
elif not b.shape.nodes[0].dem: noshow='no Node.dem'
else: noshow=None
if noshow:
self.forceGrid.itemAtPosition(0,1).widget().setText('<small>'+noshow+'</small>')
for i,j in ((0,2),(0,3),(1,1),(1,2),(1,3)): self.forceGrid.itemAtPosition(i,j).widget().setText('')
else:
try:
d=b.shape.nodes[0].dem
val=[d.force,d.torque]
rows=(0,1)
for i,j in itertools.product(rows,(0,1,2)): self.forceGrid.itemAtPosition(i,j+1).widget().setText('<small>'+str(val[i][j])+'</small>')
except IndexError:pass
def tryShowBody(self):
try:
if self.parId==None: raise IndexError()
b=woo.master.scene.dem.par[self.parId]
self.serEd=ObjectEditor(b,showType=True,parent=self,path='woo.master.scene.dem.par[%d]'%self.parId)
except IndexError:
if self.bodyIdBox.hasFocus(): return False
self.serEd=QFrame(self)
self.parId=None
self.scroll.setWidget(self.serEd)
return True
def changeIdSlot(self,newId):
self.bodyIdBox.setValue(newId);
self.bodyIdSlot(newId)
def bodyIdSlot(self,currId):
self.parId=currId
if not self.tryShowBody():
self.bodyIdBox.setStyleSheet('QWidget { background: red }')
return # we still have focus, don't attempt to change
else:
self.bodyIdBox.setStyleSheet('QWidget { background: none }')
# self.parId=currId # self.bodyIdBox.value()
if self.parId==None: self.setWindowTitle('Particle')
else: self.setWindowTitle('Particle #%d'%self.parId)
self.refreshEvent()
def gotoBodySlot(self):
try:
id=int(getBodyIdFromLabel(self.intrWithCombo.currentText()))
except ValueError: return # empty id
if not self.bodyLinkCallback:
self.bodyIdBox.setValue(id); self.parId=id
else: self.bodyLinkCallback(id)
def gotoIntrSlot(self):
ids=self.bodyIdBox.value(),getBodyIdFromLabel(self.intrWithCombo.currentText())
if not self.intrLinkCallback:
self.ii=InteractionInspector(ids)
self.ii.show()
else: self.intrLinkCallback(ids)
def refreshEvent(self):
S=woo.master.scene
try: S.dem.par[self.parId]
except: self.parId=None # invalidate deleted body
# no body shown yet, try to get the first one...
if self.parId==None and len(S.dem.par)>0:
try:
# print 'SET ZERO'
b=S.dem.par[0]; self.bodyIdBox.setValue(0); self.parId=0
except IndexError: pass
if 'opengl' in woo.config.features:
v=woo.qt.views()
if len(v)>0 and v[0].selection!=self.parId:
if self.idGlSync==self.parId: # changed in the viewer, reset ourselves
self.parId=self.idGlSync=v[0].selection; self.changeIdSlot(self.parId)
return
elif self.parId!=None: v[0].selection=self.idGlSync=self.parId # changed here, set in the viewer
meId=self.bodyIdBox.value(); pos=self.intrWithCombo.currentIndex()
try:
meLabel=makeBodyLabel(S.dem.par[meId])
except IndexError: meLabel=u'…'
self.plusLabel.setText(' '.join(meLabel.split()[1:])+' <b>+</b>') # do not repeat the id
self.bodyIdBox.setMaximum(len(S.dem.par)-1)
try: others=S.dem.par[meId].con
except IndexError: others=[]
#(i.id1 if i.id1!=meId else i.id2) for i in O.interactions.withBody(self.bodyIdBox.value()) if i.isReal]
others.sort()
self.intrWithCombo.clear()
self.intrWithCombo.addItems([makeBodyLabel(S.dem.par[i]) for i in others])
if pos>self.intrWithCombo.count() or pos<0: pos=0
self.intrWithCombo.setCurrentIndex(pos);
other=self.intrWithCombo.itemText(pos)
if other=='':
self.gotoBodyButton.setEnabled(False); self.gotoIntrButton.setEnabled(False)
other=u'∅'
else:
self.gotoBodyButton.setEnabled(True); self.gotoIntrButton.setEnabled(True)
self.gotoBodyButton.setText(u'→ %s'%other)
self.gotoIntrButton.setText(u'→ %s + %s'%(meLabel,other))
self.displayForces()
class InteractionInspector(QWidget):
def __init__(self,ids=None,parent=None,bodyLinkCallback=None):
QWidget.__init__(self,parent)
self.bodyLinkCallback=bodyLinkCallback
self.ids=ids
self.intrLinIxBox=QSpinBox(self)
self.intrLinIxBox.setMinimum(0)
self.intrLinIxBox.setMaximum(1000000000)
self.gotoId1Button=QPushButton(u'#…',self)
self.gotoId2Button=QPushButton(u'#…',self)
self.gotoId1Button.clicked.connect(self.gotoId1Slot)
self.gotoId2Button.clicked.connect(self.gotoId2Slot)
self.intrLinIxBox.valueChanged.connect(self.setLinIxSlot)
topBoxWidget=QWidget(self)
topBox=QHBoxLayout(topBoxWidget)
topBox.addWidget(self.intrLinIxBox)
topBox.addWidget(self.gotoId1Button)
labelPlus=QLabel('+',self); labelPlus.setAlignment(Qt.AlignHCenter)
topBox.addWidget(labelPlus)
topBox.addWidget(self.gotoId2Button)
topBoxWidget.setLayout(topBox)
self.setWindowTitle(u'No contact')
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setContentsMargins(0,0,0,0)
self.grid.addWidget(topBoxWidget,0,0)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.grid.addWidget(self.scroll)
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000)
if self.ids: self.setupInteraction()
def setupInteraction(self):
'Change view; called whenever the interaction to be displayed changes'
S=woo.master.scene
try:
if self.ids==None: raise IndexError() # to be caught right away
intr=S.dem.con[self.ids] # also might raise IndexError, if the contact is dead
if not intr: raise IndexError()
self.intrLinIxBox.setValue(intr.linIx)
self.serEd=ObjectEditor(intr,showType=True,parent=self.scroll,path='woo.master.scene.dem.con[%d,%d]'%(self.ids[0],self.ids[1]))
self.scroll.setWidget(self.serEd)
self.gotoId1Button.setText('#'+makeBodyLabel(S.dem.par[self.ids[0]]))
self.gotoId2Button.setText('#'+makeBodyLabel(S.dem.par[self.ids[1]]))
self.setWindowTitle('Contact #%d + #%d'%(self.ids[0],self.ids[1]))
except (IndexError,):
if self.ids: # reset view (there was an interaction)
self.ids=None
self.serEd=QFrame(self.scroll); self.scroll.setWidget(self.serEd)
self.setWindowTitle('No contact')
self.gotoId1Button.setText(u'#…'); self.gotoId2Button.setText(u'#…');
def gotoId(self,bodyId):
if self.bodyLinkCallback: self.bodyLinkCallback(bodyId)
else: self.bi=BodyInspector(bodyId); self.bi.show()
def setLinIxSlot(self,linIx):
S=woo.master.scene
try:
C=S.dem.con[linIx]
self.ids=C.id1,C.id2
self.setupInteraction()
except IndexError: pass
def gotoId1Slot(self): self.gotoId(self.ids[0])
def gotoId2Slot(self): self.gotoId(self.ids[1])
def refreshEvent(self):
S=woo.master.scene
self.intrLinIxBox.setMaximum(len(S.dem.con)-1)
# no ids yet -- try getting the first interaction, if it exists
if not self.ids:
try:
i=S.dem.con[0]
self.ids=i.id1,i.id2
self.setupInteraction()
return
except IndexError: return # no interaction exists at all
try: # try to fetch the contact we have
c=S.dem.con[self.ids[0],self.ids[1]]
self.intrLinIxBox.setValue(c.linIx) # update linIx, it can change asynchronously
except (IndexError,AttributeError):
self.ids=None
self.setupInteraction() # will make it empty
class SimulationInspector(QWidget):
def __init__(self,parent=None):
S=woo.master.scene
QWidget.__init__(self,parent)
self.setWindowTitle("Simulation Inspection")
self.setWindowIcon(QIcon(":/woo-logo.svg"))
self.tabWidget=QTabWidget(self)
demField=S.dem if S.hasDem else None
self.engineInspector=EngineInspector(parent=None)
self.bodyInspector=BodyInspector(parent=None,intrLinkCallback=self.changeIntrIds) if demField else None
self.intrInspector=InteractionInspector(parent=None,bodyLinkCallback=self.changeBodyId) if demField else None
self.cellInspector=CellInspector(parent=None)
self.sceneInspector=SceneInspector(parent=None)
for i,name,widget in [(0,'Engines',self.engineInspector),(1,'Particles',self.bodyInspector),(2,'Contacts',self.intrInspector),(3,'Cell',self.cellInspector),(4,'Scene',self.sceneInspector)]:
if widget: self.tabWidget.addTab(widget,name)
# add fields
for i,f in enumerate(S.fields):
path='woo.master.scene.fields[%d]'%i
if S.hasDem and f==S.dem: path='woo.master.scene.dem'
#if S.hasSparc and f==S.sparc: path='woo.master.scene.sparc'
self.tabWidget.addTab(ObjectEditor(f,parent=None,path=path,showType=True),'%d. '%i+path)
grid=QGridLayout(self); grid.setSpacing(0); grid.setContentsMargins(0,0,0,0)
grid.addWidget(self.tabWidget)
self.setLayout(grid)
def changeIntrIds(self,ids):
self.tabWidget.removeTab(2); self.intrInspector.close()
self.intrInspector=InteractionInspector(ids=ids,parent=None,bodyLinkCallback=self.changeBodyId)
self.tabWidget.insertTab(2,self.intrInspector,'Contacts')
self.tabWidget.setCurrentIndex(2)
def changeBodyId(self,id):
self.bodyInspector.changeIdSlot(id)
self.tabWidget.setCurrentIndex(1)
| woodem/woo | py/qt/Inspector.py | Python | gpl-2.0 | 17,102 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
# monkey patch bug in python 2.6 and lower
# http://bugs.python.org/issue6122 , http://bugs.python.org/issue1236 , http://bugs.python.org/issue1731717
if sys.version_info < (2, 7) and os.name != "nt":
import errno
import subprocess
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
#: Unsued timeout option for older python version
def wait(self, timeout=0):
"""
Wait for child process to terminate. Returns returncode
attribute.
"""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError, e:
if e.errno != errno.ECHILD:
raise
#: This happens if SIGCLD is set to be ignored or waiting
#: For child processes has otherwise been disabled for our
#: process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
subprocess.Popen.wait = wait
try:
import send2trash
except ImportError:
pass
from module.plugins.internal.Addon import Addon, Expose, threaded
from module.plugins.internal.Plugin import exists, replace_patterns
from module.plugins.internal.Extractor import ArchiveError, CRCError, PasswordError
from module.utils import fs_encode, save_join as fs_join, uniqify
class ArchiveQueue(object):
def __init__(self, plugin, storage):
self.plugin = plugin
self.storage = storage
def get(self):
try:
return [int(pid) for pid in self.plugin.retrieve("ExtractArchive:%s" % self.storage, "").decode('base64').split()]
except Exception:
return []
def set(self, value):
if isinstance(value, list):
item = str(value)[1:-1].replace(' ', '').replace(',', ' ')
else:
item = str(value).strip()
return self.plugin.store("ExtractArchive:%s" % self.storage, item.encode('base64')[:-1])
def delete(self):
return self.plugin.delete("ExtractArchive:%s" % self.storage)
def add(self, item):
queue = self.get()
if item not in queue:
return self.set(queue + [item])
else:
return True
def remove(self, item):
queue = self.get()
try:
queue.remove(item)
except ValueError:
pass
if queue is []:
return self.delete()
return self.set(queue)
class ExtractArchive(Addon):
__name__ = "ExtractArchive"
__type__ = "hook"
__version__ = "1.51"
__status__ = "testing"
__config__ = [("activated" , "bool" , "Activated" , True ),
("fullpath" , "bool" , "Extract with full paths" , True ),
("overwrite" , "bool" , "Overwrite files" , False ),
("keepbroken" , "bool" , "Try to extract broken archives" , False ),
("repair" , "bool" , "Repair broken archives (RAR required)" , False ),
("usepasswordfile", "bool" , "Use password file" , True ),
("passwordfile" , "file" , "Password file" , "passwords.txt" ),
("delete" , "bool" , "Delete archive after extraction" , True ),
("deltotrash" , "bool" , "Move to trash (recycle bin) instead delete", True ),
("subfolder" , "bool" , "Create subfolder for each package" , False ),
("destination" , "folder" , "Extract files to folder" , "" ),
("extensions" , "str" , "Extract archives ending with extension" , "7z,bz2,bzip2,gz,gzip,lha,lzh,lzma,rar,tar,taz,tbz,tbz2,tgz,xar,xz,z,zip"),
("excludefiles" , "str" , "Don't extract the following files" , "*.nfo,*.DS_Store,index.dat,thumb.db" ),
("recursive" , "bool" , "Extract archives in archives" , True ),
("waitall" , "bool" , "Run after all downloads was processed" , False ),
("renice" , "int" , "CPU priority" , 0 )]
__description__ = """Extract different kind of archives"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
("Immenz" , "immenz@gmx.net" )]
NAME_REPLACEMENTS = [(r'\.part\d+\.rar$', ".part.rar")]
def init(self):
self.event_map = {'allDownloadsProcessed': "all_downloads_processed",
'packageDeleted' : "package_deleted" }
self.queue = ArchiveQueue(self, "Queue")
self.failed = ArchiveQueue(self, "Failed")
self.interval = 60
self.extracting = False
self.last_package = False
self.extractors = []
self.passwords = []
self.repair = False
def activate(self):
for p in ("UnRar", "SevenZip", "UnZip"):
try:
module = self.pyload.pluginManager.loadModule("internal", p)
klass = getattr(module, p)
if klass.find():
self.extractors.append(klass)
if klass.REPAIR:
self.repair = self.get_config('repair')
except OSError, e:
if e.errno == 2:
self.log_warning(_("No %s installed") % p)
else:
self.log_warning(_("Could not activate: %s") % p, e)
except Exception, e:
self.log_warning(_("Could not activate: %s") % p, e)
if self.extractors:
self.log_debug(*["Found %s %s" % (Extractor.__name__, Extractor.VERSION) for Extractor in self.extractors])
self.extract_queued() #: Resume unfinished extractions
else:
self.log_info(_("No Extract plugins activated"))
@threaded
def extract_queued(self, thread):
if self.extracting: #@NOTE: doing the check here for safty (called by coreReady)
return
self.extracting = True
packages = self.queue.get()
while packages:
if self.last_package: #: Called from allDownloadsProcessed
self.last_package = False
if self.extract(packages, thread): #@NOTE: check only if all gone fine, no failed reporting for now
self.manager.dispatchEvent("all_archives_extracted")
self.manager.dispatchEvent("all_archives_processed")
else:
if self.extract(packages, thread): #@NOTE: check only if all gone fine, no failed reporting for now
pass
packages = self.queue.get() #: Check for packages added during extraction
self.extracting = False
#: Deprecated method, use `extract_package` instead
@Expose
def extractPackage(self, *args, **kwargs):
"""
See `extract_package`
"""
return self.extract_package(*args, **kwargs)
@Expose
def extract_package(self, *ids):
"""
Extract packages with given id
"""
for id in ids:
self.queue.add(id)
if not self.get_config('waitall') and not self.extracting:
self.extract_queued()
def package_deleted(self, pid):
self.queue.remove(pid)
def package_finished(self, pypack):
self.queue.add(pypack.id)
if not self.get_config('waitall') and not self.extracting:
self.extract_queued()
def all_downloads_processed(self):
self.last_package = True
if self.get_config('waitall') and not self.extracting:
self.extract_queued()
@Expose
def extract(self, ids, thread=None): #@TODO: Use pypack, not pid to improve method usability
if not ids:
return False
processed = []
extracted = []
failed = []
toList = lambda string: string.replace(' ', '').replace(',', '|').replace(';', '|').split('|')
destination = self.get_config('destination')
subfolder = self.get_config('subfolder')
fullpath = self.get_config('fullpath')
overwrite = self.get_config('overwrite')
renice = self.get_config('renice')
recursive = self.get_config('recursive')
delete = self.get_config('delete')
keepbroken = self.get_config('keepbroken')
extensions = [x.lstrip('.').lower() for x in toList(self.get_config('extensions'))]
excludefiles = toList(self.get_config('excludefiles'))
if extensions:
self.log_debug("Use for extensions: %s" % "|.".join(extensions))
#: Reload from txt file
self.reload_passwords()
download_folder = self.pyload.config.get("general", "download_folder")
#: Iterate packages -> extractors -> targets
for pid in ids:
pypack = self.pyload.files.getPackage(pid)
if not pypack:
self.queue.remove(pid)
continue
self.log_info(_("Check package: %s") % pypack.name)
#: Determine output folder
out = fs_join(download_folder, pypack.folder, destination, "") #: Force trailing slash
if subfolder:
out = fs_join(out, pypack.folder)
if not exists(out):
os.makedirs(out)
matched = False
success = True
files_ids = dict((pylink['name'], ((fs_join(download_folder, pypack.folder, pylink['name'])), pylink['id'], out)) for pylink \
in sorted(pypack.getChildren().values(), key=lambda k: k['name'])).values() #: Remove duplicates
#: Check as long there are unseen files
while files_ids:
new_files_ids = []
if extensions:
files_ids = [(fname, fid, fout) for fname, fid, fout in files_ids \
if filter(lambda ext: fname.lower().endswith(ext), extensions)]
for Extractor in self.extractors:
targets = Extractor.get_targets(files_ids)
if targets:
self.log_debug("Targets for %s: %s" % (Extractor.__name__, targets))
matched = True
for fname, fid, fout in targets:
name = os.path.basename(fname)
if not exists(fname):
self.log_debug(name, "File not found")
continue
self.log_info(name, _("Extract to: %s") % fout)
try:
pyfile = self.pyload.files.getFile(fid)
archive = Extractor(self,
fname,
fout,
fullpath,
overwrite,
excludefiles,
renice,
delete,
keepbroken,
fid)
thread.addActive(pyfile)
archive.init()
try:
new_files = self._extract(pyfile, archive, pypack.password)
finally:
pyfile.setProgress(100)
thread.finishFile(pyfile)
except Exception, e:
self.log_error(name, e)
success = False
continue
#: Remove processed file and related multiparts from list
files_ids = [(fname, fid, fout) for fname, fid, fout in files_ids \
if fname not in archive.items()]
self.log_debug("Extracted files: %s" % new_files)
for file in new_files:
self.set_permissions(file)
for filename in new_files:
file = fs_encode(fs_join(os.path.dirname(archive.filename), filename))
if not exists(file):
self.log_debug("New file %s does not exists" % filename)
continue
if recursive and os.path.isfile(file):
new_files_ids.append((filename, fid, os.path.dirname(filename))) #: Append as new target
self.manager.dispatchEvent("archive_extracted", pyfile, archive)
files_ids = new_files_ids #: Also check extracted files
if matched:
if success:
extracted.append(pid)
self.manager.dispatchEvent("package_extracted", pypack)
else:
failed.append(pid)
self.manager.dispatchEvent("package_extract_failed", pypack)
self.failed.add(pid)
else:
self.log_info(_("No files found to extract"))
if not matched or not success and subfolder:
try:
os.rmdir(out)
except OSError:
pass
self.queue.remove(pid)
return True if not failed else False
def _extract(self, pyfile, archive, password):
name = os.path.basename(archive.filename)
pyfile.setStatus("processing")
encrypted = False
try:
self.log_debug("Password: %s" % (password or "None provided"))
passwords = uniqify([password] + self.get_passwords(False)) if self.get_config('usepasswordfile') else [password]
for pw in passwords:
try:
pyfile.setCustomStatus(_("archive testing"))
pyfile.setProgress(0)
archive.verify(pw)
pyfile.setProgress(100)
except PasswordError:
if not encrypted:
self.log_info(name, _("Password protected"))
encrypted = True
except CRCError, e:
self.log_debug(name, e)
self.log_info(name, _("CRC Error"))
if not self.repair:
raise CRCError("Archive damaged")
else:
self.log_warning(name, _("Repairing..."))
pyfile.setCustomStatus(_("archive repairing"))
pyfile.setProgress(0)
repaired = archive.repair()
pyfile.setProgress(100)
if not repaired and not self.get_config('keepbroken'):
raise CRCError("Archive damaged")
else:
self.add_password(pw)
break
except ArchiveError, e:
raise ArchiveError(e)
else:
self.add_password(pw)
break
pyfile.setCustomStatus(_("archive extracting"))
pyfile.setProgress(0)
if not encrypted or not self.get_config('usepasswordfile'):
self.log_debug("Extracting using password: %s" % (password or "None"))
archive.extract(password)
else:
for pw in filter(None, uniqify([password] + self.get_passwords(False))):
try:
self.log_debug("Extracting using password: %s" % pw)
archive.extract(pw)
self.add_password(pw)
break
except PasswordError:
self.log_debug("Password was wrong")
else:
raise PasswordError
pyfile.setProgress(100)
pyfile.setStatus("processing")
delfiles = archive.items()
self.log_debug("Would delete: " + ", ".join(delfiles))
if self.get_config('delete'):
self.log_info(_("Deleting %s files") % len(delfiles))
deltotrash = self.get_config('deltotrash')
for f in delfiles:
file = fs_encode(f)
if not exists(file):
continue
if not deltotrash:
os.remove(file)
else:
try:
send2trash.send2trash(file)
except NameError:
self.log_warning(_("Unable to move %s to trash") % os.path.basename(f),
_("Send2Trash lib not found"))
except Exception, e:
self.log_warning(_("Unable to move %s to trash") % os.path.basename(f),
e.message)
else:
self.log_info(_("Moved %s to trash") % os.path.basename(f))
self.log_info(name, _("Extracting finished"))
extracted_files = archive.files or archive.list()
return extracted_files
except PasswordError:
self.log_error(name, _("Wrong password" if password else "No password found"))
except CRCError, e:
self.log_error(name, _("CRC mismatch"), e)
except ArchiveError, e:
self.log_error(name, _("Archive error"), e)
except Exception, e:
self.log_error(name, _("Unknown error"), e)
self.manager.dispatchEvent("archive_extract_failed", pyfile, archive)
raise Exception(_("Extract failed"))
#: Deprecated method, use `get_passwords` instead
@Expose
def getPasswords(self, *args, **kwargs):
"""
See `get_passwords`
"""
return self.get_passwords(*args, **kwargs)
@Expose
def get_passwords(self, reload=True):
"""
List of saved passwords
"""
if reload:
self.reload_passwords()
return self.passwords
def reload_passwords(self):
try:
passwords = []
file = fs_encode(self.get_config('passwordfile'))
with open(file) as f:
for pw in f.read().splitlines():
passwords.append(pw)
except IOError, e:
self.log_error(e)
else:
self.passwords = passwords
#: Deprecated method, use `add_password` instead
@Expose
def addPassword(self, *args, **kwargs):
"""
See `add_password`
"""
return self.add_password(*args, **kwargs)
@Expose
def add_password(self, password):
"""
Adds a password to saved list
"""
try:
self.passwords = uniqify([password] + self.passwords)
file = fs_encode(self.get_config('passwordfile'))
with open(file, "wb") as f:
for pw in self.passwords:
f.write(pw + '\n')
except IOError, e:
self.log_error(e)
| ace02000/pyload | module/plugins/hooks/ExtractArchive.py | Python | gpl-3.0 | 21,362 |
# Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import MutableMapping
import ctypes as ct
import json
import os
import sys
basestring = (unicode if sys.version_info[0] < 3 else str)
lib = ct.CDLL("libbpfprog.so")
# keep in sync with bpf_common.h
lib.bpf_module_create.restype = ct.c_void_p
lib.bpf_module_create.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_uint]
lib.bpf_module_create_from_string.restype = ct.c_void_p
lib.bpf_module_create_from_string.argtypes = [ct.c_char_p, ct.c_uint]
lib.bpf_module_destroy.restype = None
lib.bpf_module_destroy.argtypes = [ct.c_void_p]
lib.bpf_module_license.restype = ct.c_char_p
lib.bpf_module_license.argtypes = [ct.c_void_p]
lib.bpf_module_kern_version.restype = ct.c_uint
lib.bpf_module_kern_version.argtypes = [ct.c_void_p]
lib.bpf_function_start.restype = ct.c_void_p
lib.bpf_function_start.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_function_size.restype = ct.c_size_t
lib.bpf_function_size.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_fd.restype = ct.c_int
lib.bpf_table_fd.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_key_desc.restype = ct.c_char_p
lib.bpf_table_key_desc.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_leaf_desc.restype = ct.c_char_p
lib.bpf_table_leaf_desc.argtypes = [ct.c_void_p, ct.c_char_p]
# keep in sync with libbpf.h
lib.bpf_get_next_key.restype = ct.c_int
lib.bpf_get_next_key.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p]
lib.bpf_lookup_elem.restype = ct.c_int
lib.bpf_lookup_elem.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p]
lib.bpf_update_elem.restype = ct.c_int
lib.bpf_update_elem.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p,
ct.c_ulonglong]
lib.bpf_delete_elem.restype = ct.c_int
lib.bpf_delete_elem.argtypes = [ct.c_int, ct.c_void_p]
lib.bpf_open_raw_sock.restype = ct.c_int
lib.bpf_open_raw_sock.argtypes = [ct.c_char_p]
lib.bpf_attach_socket.restype = ct.c_int
lib.bpf_attach_socket.argtypes = [ct.c_int, ct.c_int]
lib.bpf_prog_load.restype = ct.c_int
lib.bpf_prog_load.argtypes = [ct.c_int, ct.c_void_p, ct.c_size_t,
ct.c_char_p, ct.c_uint, ct.c_char_p, ct.c_uint]
lib.bpf_attach_kprobe.restype = ct.c_int
lib.bpf_attach_kprobe.argtypes = [ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int, ct.c_int]
lib.bpf_detach_kprobe.restype = ct.c_int
lib.bpf_detach_kprobe.argtypes = [ct.c_char_p]
open_kprobes = {}
@atexit.register
def cleanup_kprobes():
for k, v in open_kprobes.items():
os.close(v)
desc = "-:kprobes/%s" % k
lib.bpf_detach_kprobe(desc.encode("ascii"))
class BPF(object):
SOCKET_FILTER = 1
KPROBE = 2
SCHED_CLS = 3
SCHED_ACT = 4
class Function(object):
def __init__(self, bpf, name, fd):
self.bpf = bpf
self.name = name
self.fd = fd
class Table(MutableMapping):
def __init__(self, bpf, map_fd, keytype, leaftype):
self.bpf = bpf
self.map_fd = map_fd
self.Key = keytype
self.Leaf = leaftype
def __getitem__(self, key):
key_p = ct.pointer(key)
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_lookup_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p))
if res < 0:
raise KeyError
return leaf
def __setitem__(self, key, leaf):
key_p = ct.pointer(key)
leaf_p = ct.pointer(leaf)
res = lib.bpf_update_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p), 0)
if res < 0:
raise Exception("Could not update table")
def __len__(self):
i = 0
for k in self: i += 1
return i
def __delitem__(self, key):
key_p = ct.pointer(key)
res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
if res < 0:
raise KeyError
def __iter__(self):
return BPF.Table.Iter(self, self.Key)
def iter(self): return self.__iter__()
def keys(self): return self.__iter__()
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
self.key = self.Key()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.key = self.table.next(self.key)
return self.key
def next(self, key):
next_key = self.Key()
next_key_p = ct.pointer(next_key)
key_p = ct.pointer(key)
res = lib.bpf_get_next_key(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(next_key_p, ct.c_void_p))
if res < 0:
raise StopIteration()
return next_key
@staticmethod
def _find_file(filename):
""" If filename is invalid, search in ./ of argv[0] """
if filename:
if not os.path.isfile(filename):
t = "/".join([os.path.abspath(os.path.dirname(sys.argv[0])), filename])
if os.path.isfile(t):
filename = t
else:
raise Exception("Could not find file %s" % filename)
return filename
def __init__(self, src_file="", hdr_file="", text=None, debug=0):
self.debug = debug
self.funcs = {}
if text:
self.module = lib.bpf_module_create_from_string(text.encode("ascii"), self.debug)
else:
src_file = BPF._find_file(src_file)
hdr_file = BPF._find_file(hdr_file)
self.module = lib.bpf_module_create(src_file.encode("ascii"),
hdr_file.encode("ascii"), self.debug)
if self.module == None:
raise Exception("Failed to compile BPF module %s" % src_file)
def load_func(self, func_name, prog_type):
if func_name in self.funcs:
return self.funcs[func_name]
if lib.bpf_function_start(self.module, func_name.encode("ascii")) == None:
raise Exception("Unknown program %s" % func_name)
fd = lib.bpf_prog_load(prog_type,
lib.bpf_function_start(self.module, func_name.encode("ascii")),
lib.bpf_function_size(self.module, func_name.encode("ascii")),
lib.bpf_module_license(self.module),
lib.bpf_module_kern_version(self.module),
None, 0)
if fd < 0:
print((ct.c_char * 65536).in_dll(lib, "bpf_log_buf").value)
#print(ct.c_char_p.in_dll(lib, "bpf_log_buf").value)
raise Exception("Failed to load BPF program %s" % func_name)
fn = BPF.Function(self, func_name, fd)
self.funcs[func_name] = fn
return fn
str2ctype = {
u"_Bool": ct.c_bool,
u"char": ct.c_char,
u"wchar_t": ct.c_wchar,
u"char": ct.c_byte,
u"unsigned char": ct.c_ubyte,
u"short": ct.c_short,
u"unsigned short": ct.c_ushort,
u"int": ct.c_int,
u"unsigned int": ct.c_uint,
u"long": ct.c_long,
u"unsigned long": ct.c_ulong,
u"long long": ct.c_longlong,
u"unsigned long long": ct.c_ulonglong,
u"float": ct.c_float,
u"double": ct.c_double,
u"long double": ct.c_longdouble
}
@staticmethod
def _decode_table_type(desc):
if isinstance(desc, basestring):
return BPF.str2ctype[desc]
fields = []
for t in desc[1]:
if len(t) == 2:
fields.append((t[0], BPF._decode_table_type(t[1])))
elif len(t) == 3:
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
cls = type(str(desc[0]), (ct.Structure,), dict(_fields_=fields))
return cls
def get_table(self, name, keytype=None, leaftype=None):
map_fd = lib.bpf_table_fd(self.module, name.encode("ascii"))
if map_fd < 0:
raise Exception("Failed to find BPF Table %s" % name)
if not keytype:
key_desc = lib.bpf_table_key_desc(self.module, name.encode("ascii"))
if not key_desc:
raise Exception("Failed to load BPF Table %s key desc" % name)
keytype = BPF._decode_table_type(json.loads(key_desc.decode()))
if not leaftype:
leaf_desc = lib.bpf_table_leaf_desc(self.module, name.encode("ascii"))
if not leaf_desc:
raise Exception("Failed to load BPF Table %s leaf desc" % name)
leaftype = BPF._decode_table_type(json.loads(leaf_desc.decode()))
return BPF.Table(self, map_fd, keytype, leaftype)
@staticmethod
def attach_raw_socket(fn, dev):
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
sock = lib.bpf_open_raw_sock(dev.encode("ascii"))
if sock < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to open raw device %s: %s" % (dev, errstr))
res = lib.bpf_attach_socket(sock, fn.fd)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
fn.sock = sock
@staticmethod
def attach_kprobe(fn, event, pid=0, cpu=-1, group_fd=-1):
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
ev_name = "p_" + event.replace("+", "_")
desc = "p:kprobes/%s %s" % (ev_name, event)
res = lib.bpf_attach_kprobe(fn.fd, ev_name.encode("ascii"),
desc.encode("ascii"), pid, cpu, group_fd)
if res < 0:
raise Exception("Failed to attach BPF to kprobe")
open_kprobes[ev_name] = res
return res
@staticmethod
def detach_kprobe(event):
ev_name = "p_" + event.replace("+", "_")
if ev_name not in open_kprobes:
raise Exception("Kprobe %s is not attached" % event)
os.close(open_kprobes[ev_name])
desc = "-:kprobes/%s" % ev_name
res = lib.bpf_detach_kprobe(desc.encode("ascii"))
if res < 0:
raise Exception("Failed to detach BPF from kprobe")
del open_kprobes[ev_name]
@staticmethod
def attach_kretprobe(fn, event, pid=-1, cpu=0, group_fd=-1):
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
ev_name = "r_" + event.replace("+", "_")
desc = "r:kprobes/%s %s" % (ev_name, event)
res = lib.bpf_attach_kprobe(fn.fd, ev_name.encode("ascii"),
desc.encode("ascii"), pid, cpu, group_fd)
if res < 0:
raise Exception("Failed to attach BPF to kprobe")
open_kprobes[ev_name] = res
return res
@staticmethod
def detach_kretprobe(event):
ev_name = "r_" + event.replace("+", "_")
if ev_name not in open_kprobes:
raise Exception("Kretprobe %s is not attached" % event)
os.close(open_kprobes[ev_name])
desc = "-:kprobes/%s" % ev_name
res = lib.bpf_detach_kprobe(desc.encode("ascii"))
if res < 0:
raise Exception("Failed to detach BPF from kprobe")
del open_kprobes[ev_name]
| hgn/bcc | src/python/bpf/__init__.py | Python | apache-2.0 | 12,104 |
#!/usr/bin/env python
import os
import sys
import bowl_pool
import datetime
from optparse import OptionParser
# bowlResultsFileName = "input/bowlResults.csv"
# bowlPicksFileName = "input/bowlPicks.csv"
# STPicksFileName = "input/STPicks.csv"
# bonusResultsFileName = "input/bonusResults.csv"
# bonusPicksFileName = "input/bonusPicks.csv"
# dogScoreBonusFileName = "input/dogScoreBonus.csv"
parser = OptionParser()
parser.set_defaults(srvdir='srv')
parser.add_option('-d', dest='srvdir')
(options, args) = parser.parse_args()
srvdir = options.srvdir
if not os.path.isdir(srvdir):
os.mkdir(srvdir)
mainfile = os.path.join(srvdir, 'index.txt')
main_fout = open(mainfile,'w')
main_fout.write('**Bowl Pools**\n')
main_fout.write('\n')
BowlPoolList = ['2014-2015','2015-2016','2016-2017', '2017-2018','2021-2022']
CompletedBowlPools = ['2014-2015','2015-2016', '2016-2017', '2017-2018']
#highlightList = ['Nerf Herder','Static', 'Lightning', 'Thunder',
# 'D-Bo','Em214', 'GlamKam', 'Bumblerooski', '#MamaBear',
# 'Eclipse']
#highlightList = ['Bumblerooski', "Can't Stop a Badger!", "Knights of Ren",
# "Leeerroyyy Jeennkinns!!", "Ms. Snuffleupagus", "Sweet Lou"]
highlightList = []
performAnalysis = True
#performAnalysis = False
for bowlPoolName in BowlPoolList:
main_fout.write('- `%s <%s/>`_'%(bowlPoolName, bowlPoolName))
if bowlPoolName in CompletedBowlPools:
main_fout.write('\n')
continue
# setup bowl pool object
input_dir = os.path.join('input/', bowlPoolName)
if not os.path.isdir(input_dir):
sys.stdout.write('error: could not find input directory, %s'%input_dir)
sys.exit()
output_dir = os.path.join(srvdir, bowlPoolName)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
nbins = 20
B = bowl_pool.BowlPool(input_dir, output_dir, highlightList)
# run bowl pool
B.computeResults()
B.writeScoreTotals()
B.writeScoreTotalsCSVFile()
B.writePicksTable()
B.writePicksCSVFile()
B.writeResultsTable()
B.writeResultsCSVFile()
# analyze results
if performAnalysis:
B.plotScoresHistogram(nbins)
B.plotTrajectories(highlightList)
# # print info and debug
# B.listBowlData()
for highlightedName in highlightList:
B.printPicks(highlightedName)
B.checkSTBowls()
bp_index_path = os.path.join(output_dir,'index')
bp_fout = open(bp_index_path + '.txt', 'w')
bp_fout.write('**%s Bowl Pool**\n'%bowlPoolName)
bp_fout.write('\n')
fmt = "%Y-%m-%d %H:%M:%S"
lastBowlIdx = B._nResults - 1
today = datetime.datetime.today()
bp_fout.write('| *Last updated: %s*\n'%today.strftime(fmt))
bp_fout.write('| *Last bowl scored: %s (%s vs. %s)*\n\n'%(B._bowlList[lastBowlIdx],
B._favList[lastBowlIdx],
B._dogList[lastBowlIdx]))
bp_fout.write('\n')
bp_fout.write('- `Rankings <rankings.html>`_ (`csv <rankings.csv>`__)\n')
bp_fout.write('- `Results <results.html>`_ (`csv <results.csv>`__)\n')
bp_fout.write('- `Picks <picks.html>`_ (`csv <picks.csv>`__)\n')
# bp_fout.write('- Rankings (`csv <rankings.csv>`__ | `html <rankings.html>`__)\n')
# bp_fout.write('- Results (`csv <results.csv>`__ | `html <results.html>`__)\n')
# bp_fout.write('- Picks (`csv <picks.csv>`__ | `html <picks.html>`__)\n')
bp_fout.write('\n')
if performAnalysis:
bp_fout.write('Analysis\n')
bp_fout.write('\n')
bp_fout.write('- `Score distribution <scoresHistogram.png>`_\n')
bp_fout.write('- `Points Trajectories <pointsTrajectories.png>`_\n')
bp_fout.write('- `Rankings Trajectories <rankingsTrajectories.png>`_\n')
bp_fout.write('\n')
bp_fout.close()
os.system('rst2html %s.txt %s.html'%(bp_index_path, bp_index_path))
rankings_path = os.path.join(output_dir, 'rankings')
os.system('rst2html %s.txt %s.html'%(rankings_path, rankings_path))
results_path = os.path.join(output_dir, 'results')
os.system('rst2html %s.txt %s.html'%(results_path, results_path))
picks_path = os.path.join(output_dir, 'picks')
os.system('rst2html %s.txt %s.html'%(picks_path, picks_path))
main_fout.write('\n')
main_fout.close()
| jorodo/cfb-pool | main.py | Python | gpl-3.0 | 4,375 |
from distutils.core import setup
import glob
print(glob.glob('scripts/*'))
setup(name='reynard',
version='dev',
packages=['reynard',
'reynard.monitors',
'reynard.servers'],
scripts=['scripts/reynard_basic_cli.py',
'scripts/reynard_basic_server.py']
)
| ewanbarr/reynard | setup.py | Python | mit | 323 |
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py 5023 2010/06/14 22:05:46 scons"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
Copy = ActionFactory(copy_func,
lambda dest, src: 'Copy("%s", "%s")' % (dest, src),
convert=str)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
if not must_exist and not os.path.exists(entry):
continue
if not os.path.exists(entry) or os.path.isfile(entry):
os.unlink(entry)
continue
else:
shutil.rmtree(entry, 1)
continue
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if SCons.Util.is_List(d) or isinstance(d, tuple):
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Defaults.py | Python | gpl-3.0 | 16,921 |
#!/usr/bin/env python3
import logging as log
from os import environ as env
from os import path
import modules.extra as e
from modules.assembler import Assembler
from modules.emulator import Emulator
from modules.export import export
from modules.settings import Settings
from modules.simulation import Simulation
if __name__ == '__main__':
fullFilename = path.dirname(__file__)
env['PATH'] += ':' + path.join(fullFilename, 'bin')
fmt = '%(levelname)s-%(module)s:%(lineno)s: %(message)s'
log.basicConfig(format=fmt, level=log.NOTSET)
e.settings = Settings()
machine = e.readJSONFile(e.settings.machineFile)
assembler = Assembler(machine)
simulation = Simulation(machine)
emulator = Emulator(machine, simulation.data)
if not emulator.advance(e.settings.step):
log.error('emulator premature finish')
exit(2)
export(machine)
e.settings.cleanup()
exit(0)
| fredmorcos/attic | projects/vo-tools/mgen.py | Python | isc | 942 |
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
AUTHENTICATED = current.session.s3.system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return [
MM("Dashboard", c="default", f="index",
args=["dashboard"],
restrict=[AUTHENTICATED],
),
MM("Contacts", link=False, restrict=[AUTHENTICATED])(
MM("Networks", c="org", f="group"),
MM("Groups", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation"),
MM(INDIVIDUALS, c="hrm", f="staff"),
),
MM("Facilities", c="org", f="facility", m="summary",
restrict=[AUTHENTICATED])(
),
MM("Services", c="cms", f="page", vars={"name": "Services"}),
MM("News", c="cms", f="newsfeed", args="datalist",
icon="icon-news",
restrict=[AUTHENTICATED],
),
MM("Map", c="gis", f="index",
icon="icon-map",
restrict=[AUTHENTICATED],
),
MM("Data", c="cms", f="page", vars={"name": "Data"}),
MM("Get Involved", link=False)(
MM("Events",
url="http://nycprepared.org/events",
_target="_blank",
),
MM("Learn more",
url="http://nycprepared.org",
_target="_blank",
),
MM("Donate",
url="https://sarapis.org/donate-to-nycprepared",
_target="_blank",
),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
ADMIN = current.auth.get_system_roles().ADMIN
menu_help = MM("Help", c="default", f="help", link=False, **attr)(
MM("User Guide", f="help"),
MM("Contact us", f="contact"),
#MM("About", f="about", restrict=[ADMIN]),
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = current.deployment_settings.get_security_registration_visible()
if self_registration == "index":
register = MM("Register", c="default", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration)
else:
register = MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration)
menu_auth = MM("Login", c="default", f="user", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
register,
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False,
link=False,
_id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
#MM("User Profile", m="profile"),
MM("Personal Profile", c="default", f="person", m="update"),
#MM("Contact Details", c="pr", f="person",
# args="contact",
# vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
# args="pe_subscription",
# vars={"person.pe_id" : auth.user.pe_id}),
MM("Change Password", m="change_password"),
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True,
},
f="rapid",
),
)
return menu_auth
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customised separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
AUTHENTICATED = s3.system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return M()(
M("Networks", c="org", f="group")(
M("Search"),
M("Create", m="create"),
),
M("Groups", c="hrm", f="group")(
M("Search"),
M("Create", m="create"),
),
M("Organizations", c="org", f="organisation")(
M("Search"),
M("Create", m="create",
restrict=[AUTHENTICATED]),
),
M(INDIVIDUALS, c="hrm", f="staff", t="hrm_human_resource")(
M("Search"),
M("Create", m="create"),
),
M("Your Personal Profile", c="default", f="person",
m="update")(
),
M("Import", link=False,
restrict=[ADMIN])(
M("Import Contacts", c="hrm", f="person", m="import",
vars={"group":"staff"}),
M("Import Organizations", c="org", f="organisation",
m="import"),
M("Import Groups", c="hrm", f="group", m="import"),
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Job Title Catalog", c="hrm", f="job_title",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Skills Catalog", c="hrm", f="skill",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Organization Approval", c="org", f="organisation",
m="review", restrict=[ADMIN])(
),
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
request = current.request
function = request.function
if function in ("facility", "facility_type"):
ADMIN = current.session.s3.system_roles.ADMIN
if function == "facility" and request.args(0) == "summary":
LIST = M("List", _onclick="$('#ui-id-1').click()")
MAP = M("Map", _onclick="$('#ui-id-3').click()")
REPORT = M("Report", _onclick="$('#ui-id-2').click()")
else:
LIST = M("List", m="summary")
MAP = M("Map", m="summary", vars={"t":2})
REPORT = M("Report", m="summary", vars={"t":1})
return M()(
M("Create a Facility", c="org", f="facility", m="create")(
),
M("View Facilities", c="org", f="facility", m="summary")(
LIST,
MAP,
REPORT,
),
M("Import Facilities", c="org", f="facility", m="import",
restrict=[ADMIN])(
),
M("Facility Types", c="org", f="facility_type",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
)
else:
# organisation, organisation_type or hrm
return self.hrm()
# -------------------------------------------------------------------------
def pr(self):
""" Person Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
return self.hrm()
# END =========================================================================
| sahana/Turkey | modules/templates/NYC/menus.py | Python | mit | 12,151 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 06:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job', models.CharField(max_length=50)),
('description', models.TextField(max_length=500, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| MMKnight/d-logger | user/migrations/0001_initial.py | Python | mit | 884 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example requests buyer acceptance for a single proposal.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
# Initialize appropriate service.
proposal_service = client.GetService('ProposalService', version='v202108')
# Create query.
statement = (ad_manager.StatementBuilder(version='v202108')
.Where('id = :proposalId')
.WithBindVariable('proposalId', proposal_id))
proposals_pushed_to_marketplace = 0
# Get proposals by statement.
while True:
response = proposal_service.getProposalsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
# Display results.
for proposal in response['results']:
print('Programmatic proposal with id "%s", name "%s", and status '
'"%s" will be pushed to Marketplace.' % (proposal['id'],
proposal['name'],
proposal['status']))
# Perform action.
result = proposal_service.performProposalAction(
{'xsi_type': 'RequestBuyerAcceptance'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
proposals_pushed_to_marketplace += int(result['numChanges'])
statement.offset += statement.limit
else:
break
# Display results.
if proposals_pushed_to_marketplace > 0:
print('\nNumber of programmatic proposals pushed to Marketplace: %s'
% proposals_pushed_to_marketplace)
else:
print('\nNo programmatic proposals were pushed to Marketplace.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PROPOSAL_ID)
| googleads/googleads-python-lib | examples/ad_manager/v202108/proposal_service/request_buyers_acceptance.py | Python | apache-2.0 | 2,506 |
from xml.dom import minidom, Node
from xml.parsers.expat import ExpatError, ErrorString
class GoodReadsParser(object):
def parse_result(self, url_handler):
try:
goodreads_dom = minidom.parse(url_handler)
return goodreads_dom
except ExpatError as e:
QMessageBox(QMessageBox.Critical, "Error",
"Problem while reading from goodreads.").exec_()
return None
def get_text(self, element):
value = ""
for text in element.childNodes:
try:
value += text.data
except AttributeError:
QMessageBox(QMessageBox.Critical, "Error",
"Problem while reading from goodreads.").exec_()
return None
value = value.strip() if value else None
return value
def parse_books(self, url_handler):
goodreads_dom = self.parse_result(url_handler)
books = []
for book_element in goodreads_dom.getElementsByTagName("book"):
book = self.handle_book(book_element)
if book:
books.append(book)
return books
def handle_book(self, book_element):
book = {}
for child_node in book_element.childNodes:
value = ""
if child_node.nodeType == Node.TEXT_NODE:
continue
elif child_node.nodeName == "authors":
value = self.handle_authors(child_node)
elif child_node.nodeName == "shelves":
continue
else:
value = self.get_text(child_node)
book[child_node.nodeName] = value
return book
def parse_shelfs(self, url_handler):
goodreads_dom = self.parse_result(url_handler)
shelfs = []
for shelf_element in goodreads_dom.getElementsByTagName("user_shelf"):
shelf = self.handle_shelf(shelf_element)
if shelf:
shelfs.append(shelf)
return shelfs
def handle_shelf(self, shelf_element):
shelf = {}
for child_node in shelf_element.childNodes:
value = ""
if child_node.nodeType == Node.TEXT_NODE:
continue
else:
value = self.get_text(child_node)
shelf[child_node.nodeName] = value
return shelf
def handle_authors(self, authors_element):
authors = []
for child_node in authors_element.childNodes:
author = self.handle_author(child_node)
if author:
authors.append(author)
return authors
def handle_author(self, author_element):
author = {}
for child_node in author_element.childNodes:
value = ""
if child_node.nodeType == Node.TEXT_NODE:
continue
else:
value = self.get_text(child_node)
author[child_node.nodeName] = value
if author:
return author
else:
return None
| DenitsaKostova/Bookoholic | bookoholic/goodread/goodreads_parser.py | Python | gpl-2.0 | 3,083 |
import logging
from copy import copy
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from PyQt5.QtCore import QObject
pyqtWrapperType = type(QObject)
__all__ = ["SimulationModule", "SimulationException",
"Trajectory", "Feedforward", "Controller", "Limiter",
"ModelMixer", "Model", "ModelException",
"Solver", "Disturbance", "Sensor", "ObserverMixer", "Observer"]
class SimulationModuleMeta(ABCMeta, pyqtWrapperType):
pass
class SimulationException(Exception):
pass
class SimulationModule(QObject, metaclass=SimulationModuleMeta):
"""
Smallest unit pof the simulation framework.
This class provides necessary functions like output calculation and holds
all settings that can be accessed by the user.
The :py:attr:`public_settings` are read by the
:py:class:`.SimulationInterface` and the rendered by the GUI. All entries
stated in this dictionary will be available as changeable settings for the
module.
On initialization, a possibly modified (in terms of its values) version of
this dict will be passed back to this class and is thenceforward available
via the :py:attr:`settings` property.
The most important method is :py:func:`calc_output` which is called by the
:py:class:`Simulator` to retrieve this modules output.
Args:
settings(OrderedDict): Settings for this simulation module.
These entries will be shown in the properties view and can be
changed by the user. The important entries for this base class are:
`output info`:
Dict holding an information dictionaries with keys `Name` and
`Unit` for each element in the output data.
If available, these information are used to display reasonable names
in the result view and to display the corresponding units for the
result plots.
Warn:
Do NOT use '.' in the `output_info` name field.
TODO:
Get rif of the point restriction
"""
def __init__(self, settings):
QObject.__init__(self, None)
self._logger = logging.getLogger(self.__class__.__name__)
assert isinstance(settings, dict)
self._settings = copy(settings)
self._settings["tick divider"] = settings.get("tick divider", 1)
self._settings["step width"] = None
self._settings.pop("modules", None)
@property
@abstractmethod
def public_settings(self):
pass
@property
def settings(self):
return self._settings
@property
def tick_divider(self):
return self._settings["tick divider"]
@property
def step_width(self):
return self._settings["step width"]
@step_width.setter
def step_width(self, value):
self._settings["step width"] = value
@abstractmethod
def calc_output(self, input_vector):
pass
class ModelException(SimulationException):
"""
Exception to be raised if the current system state violates modelling
assumptions.
"""
pass
class Model(SimulationModule):
"""
Base class for all user defined system models in state-space form.
Args:
settings (dict): Dictionary holding the config options for this module.
It must contain the following keys:
:input_count:
The length of the input vector for this model.
:state_count:
The length of the state vector for this model.
:initial state:
The initial state vector for this model.
"""
def __init__(self, settings):
SimulationModule.__init__(self, settings)
assert ("state_count" in settings)
assert ("input_count" in settings)
assert ("initial state" in settings)
assert len(settings["initial state"]) == settings["state_count"]
@property
def initial_state(self):
""" Return the initial state of the system. """
return self._settings["initial state"]
@abstractmethod
def state_function(self, t, x, args):
"""
Calculate the state derivatives of a system with state x at time t.
Args:
x(Array-like): System state.
t(float): System time.
Returns:
Temporal derivative of the system state at time t.
"""
pass
def root_function(self, x):
"""
Check whether a reinitialisation of the integrator should be performed.
This can be the case if there are discontinuities in the system dynamics
such as switching.
Args:
x(array-like): Current system state.
Returns:
tuple:
* bool: `True` if reset is advised.
* array-like: State to continue with.
"""
return False, x
def check_consistency(self, x):
"""
Check whether the assumptions, made in the modelling process are
violated.
Args:
x: Current system state
Raises:
:py:class:`ModelException` : If a violation is detected. This will
stop the simulation process.
"""
pass
class SolverException(SimulationException):
pass
class Solver(SimulationModule):
"""
Base Class for solver implementations
"""
def __init__(self, settings):
assert isinstance(settings["modules"]["Model"], Model)
self._model = settings["modules"]["Model"]
self.next_output = None
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
self.set_input(input_vector["system_input"])
output = self.next_output
self.next_output = self.integrate(input_vector["time"])
try:
self._model.check_consistency(self.next_output)
except ModelException as e:
raise SolverException("Timestep Integration failed! "
"Model raised: {0}".format(e))
return output
@abstractmethod
def set_input(self, *args):
pass
@abstractmethod
def integrate(self, t):
pass
@property
@abstractmethod
def t(self):
pass
@property
@abstractmethod
def successful(self):
pass
class ControllerException(SimulationException):
pass
class Controller(SimulationModule):
"""
Base class for controllers.
Args:
settings (dict): Dictionary holding the config options for this module.
It must contain the following keys:
:input_order:
The order of required derivatives from the trajectory generator.
:input_type:
Source for the feedback calculation and one of the following:
`system_state` , `system_output` , `Observer` or `Sensor` .
"""
# selectable input sources for controller
input_sources = ["system_state", "system_output", "Observer", "Sensor"]
def __init__(self, settings):
SimulationModule.__init__(self, settings)
assert ("input_order" in settings)
assert ("input_type" in settings)
assert (settings["input_type"] in self.input_sources)
@property
def input_order(self):
return self._settings["input_order"]
def calc_output(self, input_vector):
input_values = next((input_vector[src] for src in self.input_sources
if src == self._settings["input_type"]), None)
if input_values is None:
raise ControllerException("Selected Input not available")
trajectory_values = input_vector.get("Trajectory", None)
feedforward_values = input_vector.get("Feedforward", None)
return self._control(input_vector["time"], trajectory_values,
feedforward_values, input_values)
@abstractmethod
def _control(self, time, trajectory_values=None, feedforward_values=None,
input_values=None, **kwargs):
"""
Placeholder for control law calculations.
For more sophisticated implementations overload :py:func:`calc_output` .
Args:
time (float): Current time.
trajectory_values (array-like): Desired values from the trajectory
generator.
feedforward_values (array-like): Output of feedforward block.
input_values (array-like): The input values selected by
``input_type`` .
**kwargs: Placeholder for custom parameters.
Returns:
Array: Control output.
"""
pass
class Observer(SimulationModule):
"""
Base class for observers
"""
def __init__(self, settings):
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
system_input = input_vector.get("system_input", None)
if "ObserverMixer" in input_vector:
system_output = input_vector["ObserverMixer"]
elif "system_output" in input_vector:
system_output = input_vector["system_output"]
else:
raise SimulationException("No Observer input specified")
return self._observe(input_vector["time"], system_input, system_output)
@abstractmethod
def _observe(self, time, system_input, system_output):
"""
Placeholder for observer law.
Args:
time: Current time.
system_input: Current system input.
system_output: Current system output.
Returns:
Estimated system state
"""
pass
class Feedforward(SimulationModule):
"""
Base class for all feedforward implementations
"""
def __init__(self, settings):
self._model = settings["modules"]["Model"]
SimulationModule.__init__(self, settings)
assert ("input_order" in settings)
@property
def input_order(self):
return self._settings["input_order"]
def calc_output(self, input_dict):
return self._feedforward(input_dict["time"], input_dict["Trajectory"])
@abstractmethod
def _feedforward(self, time, trajectory_values):
"""
Placeholder for feedforward calculations.
Args:
time (float): Current time.
trajectory_values(array-like): Desired values from the trajectory
generator.
Returns:
Array: Feedforward output. This signal can be added to the
controllers output via the :py:class:`.ModelMixer` and is also
directly passed to the controller.
"""
pass
class TrajectoryException(SimulationException):
pass
class Trajectory(SimulationModule):
"""
Base class for all trajectory generators
"""
def __init__(self, settings):
control_order = 0
feedforward_order = 0
if "Controller" in settings["modules"].keys():
control_order = settings["modules"]["Controller"].input_order
if "Feedforward" in settings["modules"].keys():
feedforward_order = settings["modules"]["Feedforward"].input_order
settings.update(differential_order=max([control_order,
feedforward_order]))
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
desired = self._desired_values(input_vector["time"])
return desired
@abstractmethod
def _desired_values(self, t):
"""
Placeholder for calculations of desired values.
Args:
t (float): Time.
Returns:
Array: Trajectory output. This should always be a two-dimensional
array holding the components in to 0th and their derivatives in
the 1th axis.
"""
pass
class MixerException(Exception):
pass
class SignalMixer(SimulationModule):
"""
Base class for all Signal mixing modules
"""
def __init__(self, settings):
assert "input signals" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
signals = [value for signal, value in input_vector.items()
if signal in self._settings["input signals"]]
return self._mix(signals)
class ModelMixer(SignalMixer):
pass
class ObserverMixer(SignalMixer):
pass
class Limiter(SimulationModule):
"""
Base class for all limiter variants
"""
def __init__(self, settings):
assert "input_signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._limit(input_dict[self._settings["input_signal"]])
def _limit(self, values):
"""
Placeholder for actual limit calculations.
Args:
values(array-like): Values to limit.
Returns:
Array: Limited output.
"""
return values
class Sensor(SimulationModule):
"""
Base class for all sensor variants
"""
def __init__(self, settings):
assert "input signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._measure(input_dict[self._settings["input signal"]])
def _measure(self, value):
"""
Placeholder for measurement calculations.
One may reorder or remove state elements or introduce measurement delays
here.
Args:
value (array-like float): Values from the source selected by the
``input_signal`` property.
Returns:
array-like float: 'Measured' values.
"""
return value
class Disturbance(SimulationModule):
"""
Base class for all disturbance variants
"""
def __init__(self, settings):
assert "input signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._disturb(input_dict[self._settings["input signal"]])
@abstractmethod
def _disturb(self, value):
"""
Placeholder for disturbance calculations.
If the noise is to be dependent on the measured signal use its `value`
to create the noise.
Args:
value (array-like float): Values from the source selected by the
``input_signal`` property.
Returns:
array-like float: Noise that will be mixed with a signal later on.
"""
pass
| cklb/PyMoskito | pymoskito/simulation_modules.py | Python | bsd-3-clause | 14,724 |
import json
import re
import pkg_resources
import requests
from bs4 import BeautifulSoup
import threading
import string
import random
import time
import socket
import socks
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
from core.compatible import version
def extra_requirements_dict():
return {}
def _parse_webpage(target, timeout_sec, language, retries, socks_proxy, scan_cmd, scan_id):
webpage = {}
tries = 0
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
try:
if timeout_sec is not None:
response = requests.get(target, timeout=timeout_sec)
else:
response = requests.get(target)
webpage['url'] = response.url
webpage['headers'] = response.headers
webpage['response'] = response.text
webpage['html'] = BeautifulSoup(response.text, 'html.parser')
webpage['scripts'] = [script['src']
for script in webpage['html'].findAll('script', src=True)]
webpage['metatags'] = {meta['name'].lower(): meta['content']
for meta in webpage['html'].findAll('meta', attrs=dict(name=True, content=True))}
return webpage
except:
tries += 1
if tries >= retries:
info(messages(language, "no_response"))
return
def _prepare_app(app):
for key in ['url', 'html', 'script', 'implies']:
try:
value = app[key]
except KeyError:
app[key] = []
else:
if not isinstance(value, list):
app[key] = [value]
for key in ['headers', 'meta']:
try:
value = app[key]
except KeyError:
app[key] = {}
obj = app['meta']
if not isinstance(obj, dict):
app['meta'] = {'generator': obj}
for key in ['headers', 'meta']:
obj = app[key]
app[key] = {k.lower(): v for k, v in obj.items()}
for key in ['url', 'html', 'script']:
app[key] = [_prepare_pattern(pattern) for pattern in app[key]]
for key in ['headers', 'meta']:
obj = app[key]
for name, pattern in obj.items():
obj[name] = _prepare_pattern(obj[name])
def _prepare_pattern(pattern):
regex, _, rest = pattern.partition('\\;')
try:
return re.compile(regex, re.I)
except re.error as e:
# regex that never matches:
# http://stackoverflow.com/a/1845097/413622
return re.compile(r'(?!x)x')
def _has_app(app, webpage):
try:
for regex in app['url']:
if regex.search(webpage['url']):
return True
for name, regex in app['headers'].items():
if name in webpage['headers']:
content = webpage['headers'][name]
if regex.search(content):
return True
for regex in app['script']:
for script in webpage['scripts']:
if regex.search(script):
return True
for name, regex in app['meta'].items():
if name in webpage['metatags']:
content = webpage['metatags'][name]
if regex.search(content):
return True
for regex in app['html']:
if regex.search(webpage['response']):
return True
except:
pass
def _get_implied_apps(detected_apps, apps1):
def __get_implied_apps(detect, apps):
_implied_apps = set()
for detected in detect:
try:
_implied_apps.update(set(apps[detected]['implies']))
except KeyError:
pass
return _implied_apps
implied_apps = __get_implied_apps(detected_apps, apps1)
all_implied_apps = set()
while not all_implied_apps.issuperset(implied_apps):
all_implied_apps.update(implied_apps)
implied_apps = __get_implied_apps(all_implied_apps, apps1)
return all_implied_apps
def analyze(target, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
socks_proxy, scan_id, scan_cmd):
webpage = _parse_webpage(
target, timeout_sec, language, retries, socks_proxy, scan_cmd, scan_id)
obj = json.loads(pkg_resources.resource_string(__name__, "apps.json").decode()
if version() == 3 else pkg_resources.resource_string(__name__, "apps.json"))
apps = obj['apps']
detected = []
for app_name, app in apps.items():
_prepare_app(app)
if _has_app(app, webpage):
detected.append(app_name)
detected = set(detected).union(_get_implied_apps(detected, apps))
category_wise = {}
for app_name in detected:
try:
cats = apps[app_name]['cats']
for cat in cats:
category_wise[app_name] = obj['categories'][str(cat)]['name']
except Exception:
pass
inv_map = {}
for k, v in category_wise.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
for x in inv_map.items():
info(messages(language, "category_framework").format(
x[0], ', '.join(x[1])))
data = json.dumps(
{'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wappalyzer_scan',
'DESCRIPTION': x[0] + ': ' + ', '.join(x[1]), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(thread_tmp_filename, 'w', '0', language)
__log_into_file(log_in_file, 'a', data, language)
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(
target) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':
threads = []
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
total_req = 8000
if target_type(target) != "HTTP":
target = 'http://' + target
t = threading.Thread(target=analyze,
args=(
target, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target_to_host(target),
"", 'dir_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
break
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) != 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() == 1 or kill_switch == kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write == 1:
info(messages(language, "nothing_found").format(
target, "wappalyzer_scan"))
if verbose_level != 0:
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wappalyzer_scan',
'DESCRIPTION': messages(language, "not_found"), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'wappalyzer_scan', target))
| Nettacker/Nettacker | lib/scan/wappalyzer/engine.py | Python | gpl-3.0 | 9,456 |
'''
The tests in this package are to ensure the proper resultant dtypes of
set operations.
'''
import itertools as it
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import Int64Index, RangeIndex
from pandas.tests.indexes.conftest import indices_list
import pandas.util.testing as tm
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex)
}
@pytest.fixture(params=list(it.combinations(indices_list, 2)),
ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__)
def index_pair(request):
"""
Create all combinations of 2 index types.
"""
return request.param
def test_union_same_types(indices):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = indices.sort_values()
idx2 = indices.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index_pair):
# GH 23525
idx1, idx2 = index_pair
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail('This test only considers non compatible indexes.')
if any(isinstance(idx, pd.MultiIndex) for idx in index_pair):
pytest.xfail('This test doesn\'t consider multiindixes.')
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail('This test only considers non matching dtypes.')
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype('O')
assert idx2.union(idx1).dtype == np.dtype('O')
@pytest.mark.parametrize('idx_fact1,idx_fact2',
COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
| cbertinato/pandas | pandas/tests/indexes/test_setops.py | Python | bsd-3-clause | 2,362 |
#!/usr/bin/python
import sys
import glob
from libSimProm import SimProm
################################################################################
# Fake Comport + Arduino simulation
class SimCom:
SimDevice = "/fake/SIMUDUINO"
device = SimDevice
description = "Arduino Serial Simulation"
isOpen = False
accumulator = ""
eeprom = None
def __init__( self ):
self.device = self.SimDevice
self.description = "Arduino Serial Simulation"
self.isOpen = False
self.flush()
self.eeprom = SimProm()
print "Serial simulation mode active."
####################
# fake port interface stuff
def close( self ):
self.isOpen = True
def open ( self ):
self.isOpen = False
def flush ( self ):
self.accumulator = ""
return
def flushInput ( self ):
self.flush()
return
def flushOutput ( self ):
self.flush()
return
####################
# stashed data for return values
stashedLine = "=0xFFFF\n"
def stash( self, val ):
self.stashedLine = "=0x{:04x}\n".format( int( val ))
####################
def handleLine( self ):
args = self.accumulator.strip().split( "," )
cmd = args[0]
##############################
if cmd == "ver":
self.stash( 0xEE )
return
if cmd == "cr":
index = args[1]
self.stash( self.eeprom.Get( index ))
return
if cmd == "cw":
index = args[1]
value = args[2]
self.stash( self.eeprom.Set( index, value ))
return
##############################
if cmd == "lp":
pattern = args[1]
print "SIM: LED pattern set for {}".format( pattern )
self.stash( pattern )
return
if cmd == "ls":
ledpin = args[1]
print "SIM: LED set for {}".format( ledpin )
self.stash( ledpin )
return
##############################
if cmd == "iwnr":
sz = int( args[1] )
addr = int( args[2] )
data = int( args[3] )
print "0x{:02x}.Write: 0x{:02x}".format( addr, data )
self.stash( data )
return
if cmd == "irnr":
sz = int( args[1] )
addr = int( args[2] )
print "0x{:02x}.Read".format( addr )
self.stash( 0x00 )
return
if cmd == "iw":
sz = int( args[1] )
addr = int( args[2] )
reg = int( args[3] )
data = int( args[4] )
print "0x{:02x}.Write: [0x{:02x}] = 0x{:02x}".format( addr, reg, data )
self.stash( data )
return
if cmd == "ir":
sz = int( args[1] )
addr = int( args[2] )
reg = int( args[3] )
print "0x{:02x}.Read: [0x{:02x}]".format( addr, reg )
return
##############################
print "{}: unknown option".format( cmd )
return
####################
# handle writes to us
def write ( self, txt ):
#sys.stdout.write( "SER WR:|" )
#sys.stdout.write( self.cleanText( txt ))
#sys.stdout.write( "|" )
#sys.stdout.flush()
# we're going to assume the newline is the end of a string.
self.accumulator = self.accumulator + txt
if( '\n' in self.accumulator or '\r' in self.accumulator ):
self.handleLine()
self.accumulator = ""
return
####################
# handle reads from us
def readLine( self ):
# do something based on "txt"
return self.stashedLine
def read( self, count ):
retval = '\n'
try:
retval = self.stashedLine[0]
self.stashedLine = self.stashedLine[1:]
except:
pass
return retval
| BleuLlama/LlamaPyArdy | Python/libs/libSimCom.py | Python | mit | 3,270 |
'''
Creates the plot of the predicted amount of waste for different mutation
rates and different numbers of active genes. To make the plot use:
python wasteplot.py
The graph will be saved to Probability.eps
NOTE: You CANNOT use pypy for this as pylab is current unsupported. Use
python 2.7 instead.
'''
from pylab import plot, show, legend, savefig, xlabel, ylabel
from util import linecycler
if __name__ == '__main__':
X = range(1, 1000)
for m in [0.0001, 0.0002, 0.0004, 0.008, 0.001, 0.002, 0.004,
0.008, 0.01, 0.02, 0.04, 0.08, 0.1, 0.2, 0.4, 0.8]:
Y = [(1 - m) ** x for x in X]
plot(X, Y, label=str(m), linestyle=next(linecycler), linewidth=2.5)
legend(loc='best', title='Mutation Rate')
xlabel("Number of Active Genes")
ylabel("Probability of No Active Gene Mutating")
savefig("Probability.eps", dpi=300)
show()
| brianwgoldman/ReducingWastedEvaluationsCGP | wasteplot.py | Python | bsd-2-clause | 883 |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
strip_or_none,
float_or_none,
int_or_none,
merge_dicts,
parse_iso8601,
str_or_none,
url_or_none,
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '68993eda72ef62386a15ea2cf3c93107',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'mp4',
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.04,
},
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
# Old API endpoint, serves more formats but may fail for some videos
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id, 'Downloading asset JSON',
'Unable to download asset JSON', fatal=False)
# New API endpoint
if not data:
token = self._download_json(
'%s/tokens' % self._REST_API_BASE, video_id,
'Downloading token', data=b'',
headers={'Content-Type': 'application/json'})['vrtPlayerToken']
data = self._download_json(
'%s/videos/%s' % (self._REST_API_BASE, video_id),
video_id, 'Downloading video JSON', fatal=False, query={
'vrtPlayerToken': token,
'client': '%s@PROD' % site_id,
}, expected_status=400)
message = data.get('message')
if message and not data.get('title'):
if data.get('code') == 'AUTHENTICATION_REQUIRED':
self.raise_login_required(message)
raise ExtractorError(message, expected=True)
title = data['title']
description = data.get('description')
formats = []
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False))
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id=format_type, fatal=False))
elif format_type == 'HSS':
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitles = {}
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan',
'info_dict': {
'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8',
'display_id': 'emma-pakt-thilly-aan',
'ext': 'mp4',
'title': 'Emma pakt Thilly aan',
'description': 'md5:c5c9b572388a99b2690030afa3f3bad7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 118.24,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?P<site_id>vrtnu)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
# Available via old API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1/postbus-x-s1a1/',
'info_dict': {
'id': 'pbs-pub-2e2d8c27-df26-45c9-9dc6-90c78153044d$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'mp4',
'title': 'De zwarte weduwe',
'description': 'md5:db1227b0f318c849ba5eab1fef895ee4',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'season': 'Season 1',
'season_number': 1,
'episode_number': 1,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['is not a supported codec'],
}, {
# Only available via new API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/',
'info_dict': {
'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1',
'ext': 'mp4',
'title': 'Aflevering 5',
'description': 'Wie valt door de mand tijdens een missie?',
'duration': 2967.06,
'season': 'Season 1',
'season_number': 1,
'episode_number': 5,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_data = {
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
'loginID': username,
'password': password,
'authMode': 'cookie',
}
auth_info = self._gigya_login(auth_data)
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
# When requesting a token, no actual token is returned, but the
# necessary cookies are set.
self._request_webpage(
'https://token.vrt.be',
None, note='Requesting a token', errnote='Could not get a token',
headers={
'Content-Type': 'application/json',
'Referer': 'https://www.vrt.be/vrtnu/',
},
data=json.dumps({
'uid': auth_info['UID'],
'uidsig': auth_info['UIDSignature'],
'ts': auth_info['signatureTimestamp'],
'email': auth_info['profile']['email'],
}).encode('utf-8'))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
# title is optional here since it may be extracted by extractor
# that is delegated from here
title = strip_or_none(self._html_search_regex(
r'(?ms)<h1 class="content__heading">(.+?)</h1>',
webpage, 'title', default=None))
description = self._html_search_regex(
r'(?ms)<div class="content__description">(.+?)</div>',
webpage, 'description', default=None)
season = self._html_search_regex(
[r'''(?xms)<div\ class="tabs__tab\ tabs__tab--active">\s*
<span>seizoen\ (.+?)</span>\s*
</div>''',
r'<option value="seizoen (\d{1,3})" data-href="[^"]+?" selected>'],
webpage, 'season', default=None)
season_number = int_or_none(season)
episode_number = int_or_none(self._html_search_regex(
r'''(?xms)<div\ class="content__episode">\s*
<abbr\ title="aflevering">afl</abbr>\s*<span>(\d+)</span>
</div>''',
webpage, 'episode_number', default=None))
release_date = parse_iso8601(self._html_search_regex(
r'(?ms)<div class="content__broadcastdate">\s*<time\ datetime="(.+?)"',
webpage, 'release_date', default=None))
# If there's a ? or a # in the URL, remove them and everything after
clean_url = urlh.geturl().split('?')[0].split('#')[0].strip('/')
securevideo_url = clean_url + '.mssecurevideo.json'
try:
video = self._download_json(securevideo_url, display_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
self.raise_login_required()
raise
# We are dealing with a '../<show>.relevant' URL
redirect_url = video.get('url')
if redirect_url:
return self.url_result(self._proto_relative_url(redirect_url, 'https:'))
# There is only one entry, but with an unknown key, so just get
# the first one
video_id = list(video.values())[0].get('videoid')
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'season': season,
'season_number': season_number,
'episode_number': episode_number,
'release_date': release_date,
})
| vinegret/youtube-dl | youtube_dl/extractor/canvas.py | Python | unlicense | 14,571 |
# -*- coding: utf-8 -*-
#
# django-filter documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 17 11:25:20 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-filter'
copyright = u'2019, Alex Gaynor, Carlton Gibson and others.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-filterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-filter.tex', u'django-filter Documentation',
u'Alex Gaynor and others.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-filter', u'django-filter Documentation',
[u'Alex Gaynor and others.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-filter', u'django-filter Documentation',
u'Alex Gaynor and others.', 'django-filter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# see:
# https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only import and set the theme if we're building docs locally
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| alex/django-filter | docs/conf.py | Python | bsd-3-clause | 8,216 |
from datetime import datetime
from sys import exit
from ..concurrency import WorkerPool
from ..utils.cmdline import count_items, get_target_nodes
from ..utils.table import ROW_SEPARATOR, render_table
from ..utils.text import (
blue,
bold,
cyan,
cyan_unless_zero,
error_summary,
format_duration,
green,
green_unless_zero,
mark_for_translation as _,
red,
red_unless_zero,
)
from ..utils.ui import io
def stats_summary(node_stats, total_duration):
for node in node_stats.keys():
node_stats[node]['total'] = sum([
node_stats[node]['good'],
node_stats[node]['bad'],
node_stats[node]['unknown'],
])
try:
node_stats[node]['health'] = \
(node_stats[node]['good'] / float(node_stats[node]['total'])) * 100.0
except ZeroDivisionError:
node_stats[node]['health'] = 0
totals = {
'items': 0,
'good': 0,
'bad': 0,
'unknown': 0,
}
node_ranking = []
for node_name, stats in node_stats.items():
totals['items'] += stats['total']
totals['good'] += stats['good']
totals['bad'] += stats['bad']
totals['unknown'] += stats['unknown']
node_ranking.append((
stats['health'],
node_name,
stats['total'],
stats['good'],
stats['bad'],
stats['unknown'],
stats['duration'],
))
node_ranking = sorted(node_ranking, reverse=True)
try:
totals['health'] = (totals['good'] / float(totals['items'])) * 100.0
except ZeroDivisionError:
totals['health'] = 0
rows = [[
bold(_("node")),
_("items"),
green(_("good")),
red(_("bad")),
cyan(_("unknown")),
_("health"),
_("duration"),
], ROW_SEPARATOR]
for health, node_name, items, good, bad, unknown, duration in node_ranking:
rows.append([
node_name,
str(items),
green_unless_zero(good),
red_unless_zero(bad),
cyan_unless_zero(unknown),
"{0:.1f}%".format(health),
format_duration(duration),
])
if len(node_ranking) > 1:
rows.append(ROW_SEPARATOR)
rows.append([
bold(_("total ({} nodes)").format(len(node_stats.keys()))),
str(totals['items']),
green_unless_zero(totals['good']),
red_unless_zero(totals['bad']),
cyan_unless_zero(totals['unknown']),
"{0:.1f}%".format(totals['health']),
format_duration(total_duration),
])
alignments = {
1: 'right',
2: 'right',
3: 'right',
4: 'right',
5: 'right',
6: 'right',
7: 'right',
}
for line in render_table(rows, alignments=alignments):
io.stdout("{x} {line}".format(x=blue("i"), line=line))
def bw_verify(repo, args):
errors = []
node_stats = {}
pending_nodes = get_target_nodes(repo, args['targets'])
start_time = datetime.now()
io.progress_set_total(count_items(pending_nodes))
def tasks_available():
return bool(pending_nodes)
def next_task():
node = pending_nodes.pop()
return {
'target': node.verify,
'task_id': node.name,
'kwargs': {
'autoonly_selector': args['autoonly'],
'autoskip_selector': args['autoskip'],
'show_all': args['show_all'],
'show_diff': args['show_diff'],
'workers': args['item_workers'],
},
}
def handle_result(task_id, return_value, duration):
node_stats[task_id] = return_value
def handle_exception(task_id, exception, traceback):
msg = "{}: {}".format(
task_id,
exception,
)
io.stderr(traceback)
io.stderr(repr(exception))
io.stderr(msg)
errors.append(msg)
worker_pool = WorkerPool(
tasks_available,
next_task,
handle_result=handle_result,
handle_exception=handle_exception,
pool_id="verify",
workers=args['node_workers'],
)
worker_pool.run()
if args['summary'] and node_stats:
stats_summary(node_stats, datetime.now() - start_time)
error_summary(errors)
exit(1 if errors else 0)
| bundlewrap/bundlewrap | bundlewrap/cmdline/verify.py | Python | gpl-3.0 | 4,452 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert PHP localization files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/php2po.html
for examples and usage instructions.
"""
import logging
from translate.convert import convert
from translate.storage import php, po
logger = logging.getLogger(__name__)
class php2po:
"""Convert a .php file to a .po file for handling the translation."""
def convertstore(self, inputstore, duplicatestyle="msgctxt"):
"""Convert a .php file to a .po file."""
outputstore = po.pofile()
outputheader = outputstore.header()
outputheader.addnote("extracted from %s" % inputstore.filename,
"developer")
for inputunit in inputstore.units:
outputunit = self.convertunit(inputunit, "developer")
if outputunit is not None:
outputstore.addunit(outputunit)
outputstore.removeduplicates(duplicatestyle)
return outputstore
def mergestore(self, templatestore, inputstore, blankmsgstr=False,
duplicatestyle="msgctxt"):
"""Convert two .php files to a .po file."""
outputstore = po.pofile()
outputheader = outputstore.header()
outputheader.addnote("extracted from %s, %s" % (templatestore.filename,
inputstore.filename),
"developer")
inputstore.makeindex()
# Loop through the original file, looking at units one by one.
for templateunit in templatestore.units:
outputunit = self.convertunit(templateunit, "developer")
# Try and find a translation of the same name.
if templateunit.name in inputstore.locationindex:
translatedinputunit = inputstore.locationindex[templateunit.name]
# Need to check that this comment is not a copy of the
# developer comments.
translatedoutputunit = self.convertunit(translatedinputunit,
"translator")
else:
translatedoutputunit = None
# If we have a valid po unit, get the translation and add it.
if outputunit is not None:
if translatedoutputunit is not None and not blankmsgstr:
outputunit.target = translatedoutputunit.source
outputstore.addunit(outputunit)
elif translatedoutputunit is not None:
logger("error converting original properties definition %s",
templateunit.name)
outputstore.removeduplicates(duplicatestyle)
return outputstore
def convertunit(self, inputunit, origin):
"""Convert a .php unit to a .po unit."""
outputunit = po.pounit(encoding="UTF-8")
outputunit.addnote(inputunit.getnotes(origin), origin)
outputunit.addlocation("".join(inputunit.getlocations()))
outputunit.source = inputunit.source
outputunit.target = ""
return outputunit
def convertphp(inputfile, outputfile, templatefile, pot=False,
duplicatestyle="msgctxt"):
"""Read inputfile using php, convert using php2po, write to outputfile."""
inputstore = php.phpfile(inputfile)
convertor = php2po()
if templatefile is None:
outputstore = convertor.convertstore(inputstore,
duplicatestyle=duplicatestyle)
else:
templatestore = php.phpfile(templatefile)
outputstore = convertor.mergestore(templatestore, inputstore,
blankmsgstr=pot,
duplicatestyle=duplicatestyle)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
def main(argv=None):
formats = {
"php": ("po", convertphp), ("php", "php"): ("po", convertphp),
"html": ("po", convertphp), ("html", "html"): ("po", convertphp),
}
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True, description=__doc__)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == '__main__':
main()
| biswajitsahu/kuma | vendor/packages/translate/convert/php2po.py | Python | mpl-2.0 | 5,142 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Johannes Ring, 2009.
#
# First added: 2009-09-16
# Last changed: 2009-09-16
from subprocess import Popen, PIPE, STDOUT
# These are taken from http://ivory.idyll.org/blog/mar-07/replacing-commands-with-subprocess
__all__ = ['getoutput', 'getstatusoutput']
def getoutput(cmd):
"Replacement for commands.getoutput which does not work on Windows."
pipe = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, bufsize=-1)
r = pipe.wait()
output, error = pipe.communicate()
return output
def getstatusoutput(cmd, input=None, cwd=None, env=None):
"Replacement for commands.getstatusoutput which does not work on Windows."
pipe = Popen(cmd, shell=True, cwd=cwd, env=env, stdout=PIPE, stderr=STDOUT)
output, error = pipe.communicate(input=input)
assert not error
status = pipe.returncode
return status, output
| FEniCS/dolfin | site-packages/dolfin_utils/commands.py | Python | lgpl-3.0 | 1,591 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLstRoot = [['library_kinematics_models', 'kinematics_model', 'technique_common', 'joint'], ['library_joints', 'joint']]
attrName = 'id'
attrVal = 'revolute_joint'
numericNodeList = ['axis', 'min', 'max']
class SimpleJudgingObject:
def __init__(self, _tagLstRoot, _attrName, _attrVal, _numericNodeList):
self.tagListRoot = _tagLstRoot
self.attrName = _attrName
self.attrVal = _attrVal
self.numericNodeList = _numericNodeList
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeKinematicsBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# check that the joint element and all children are preserved
self.__assistant.SmartPreservation(context, self.tagListRoot, self.attrName, self.attrVal, self.numericNodeList)
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeKinematicsSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeKinematicsExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# for exemplary badge, element must be preserved in original location
originalTagList = [self.tagListRoot[0]]
self.__assistant.SmartPreservation(context, originalTagList, self.attrName, self.attrVal, self.numericNodeList)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLstRoot, attrName, attrVal, numericNodeList);
| KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_kinematics_model/kinematics_model/technique_common/joint/revolute/limit/limit.py | Python | mit | 4,505 |
# A driver for running 2D alignment using the FijiBento alignment project
# The input is a directory that contains image files (tiles), and the output is a 2D montage of these files
# Activates ComputeSIFTFeaturs -> MatchSIFTFeatures -> OptimizeMontageTransfrom
# and the result can then be rendered if needed
#
# requires:
# - java (executed from the command line)
# -
import sys
import os
import argparse
import json
import itertools
from bounding_box import BoundingBox
import time
from filter_tiles import filter_tiles
from create_sift_features_cv2 import create_sift_features
from create_surf_features_cv2 import create_surf_features
#from match_sift_features import match_sift_features
from match_sift_features_and_filter_cv2 import match_single_sift_features_and_filter
from json_concat import json_concat
from optimize_2d_mfovs import optimize_2d_mfovs
from utils import write_list_to_file
def load_tilespecs(tile_file):
tile_file = tile_file.replace('file://', '')
with open(tile_file, 'r') as data_file:
tilespecs = json.load(data_file)
return tilespecs
# Command line parser
parser = argparse.ArgumentParser(description='A driver that does a 2D affine alignment of images.')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-w', '--workspace_dir', type=str,
help='a directory where the output files of the different stages will be kept (default: current directory)',
default='.')
parser.add_argument('-o', '--output_file_name', type=str,
help='the file that includes the output to be rendered in json format (default: output.json)',
default='output.json')
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if )',
default=None)
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: 1)',
default=None)
args = parser.parse_args()
print args
# create a workspace directory if not found
if not os.path.exists(args.workspace_dir):
os.makedirs(args.workspace_dir)
tiles_fname_prefix = os.path.splitext(os.path.basename(args.tiles_fname))[0]
# read tile spec and find the features for each tile
tilespecs = load_tilespecs(args.tiles_fname)
all_features = {}
all_matched_features = []
start_time = time.time()
for i, ts in enumerate(tilespecs):
imgurl = ts["mipmapLevels"]["0"]["imageUrl"]
tile_fname = os.path.basename(imgurl).split('.')[0]
# create the features of these tiles
features_json = os.path.join(args.workspace_dir, "{0}_sifts_{1}.hdf5".format(tiles_fname_prefix, tile_fname))
if not os.path.exists(features_json):
create_sift_features(args.tiles_fname, features_json, i, args.conf_file_name)
all_features[imgurl] = features_json
print 'Features computation took {0:1.4f} seconds'.format(time.time() - start_time)
# read every pair of overlapping tiles, and match their sift features
# TODO: add all tiles to a kd-tree so it will be faster to find overlap between tiles
# iterate over the tiles, and for each tile, find intersecting tiles that overlap,
# and match their features
# Nested loop:
# for each tile_i in range[0..N):
# for each tile_j in range[tile_i..N)]
start_time = time.time()
indices = []
for pair in itertools.combinations(xrange(len(tilespecs)), 2):
idx1 = pair[0]
idx2 = pair[1]
ts1 = tilespecs[idx1]
ts2 = tilespecs[idx2]
# if the two tiles intersect, match them
bbox1 = BoundingBox.fromList(ts1["bbox"])
bbox2 = BoundingBox.fromList(ts2["bbox"])
if bbox1.overlap(bbox2):
imageUrl1 = ts1["mipmapLevels"]["0"]["imageUrl"]
imageUrl2 = ts2["mipmapLevels"]["0"]["imageUrl"]
tile_fname1 = os.path.basename(imageUrl1).split('.')[0]
tile_fname2 = os.path.basename(imageUrl2).split('.')[0]
print "Matching features of tiles: {0} and {1}".format(imageUrl1, imageUrl2)
index_pair = [idx1, idx2]
match_json = os.path.join(args.workspace_dir, "{0}_sift_matches_{1}_{2}.json".format(tiles_fname_prefix, tile_fname1, tile_fname2))
# match the features of overlapping tiles
if not os.path.exists(match_json):
match_single_sift_features_and_filter(args.tiles_fname, all_features[imageUrl1], all_features[imageUrl2], match_json, index_pair, conf_fname=args.conf_file_name)
all_matched_features.append(match_json)
print 'features matching took {0:1.4f} seconds'.format(time.time() - start_time)
# Create a single file that lists all tilespecs and a single file that lists all pmcc matches (the os doesn't support a very long list)
matches_list_file = os.path.join(args.workspace_dir, "all_matched_sifts_files.txt")
write_list_to_file(matches_list_file, all_matched_features)
# optimize the 2d layer montage
if not os.path.exists(args.output_file_name):
print "Optimizing section in tilespec: {}".format(args.tiles_fname)
start_time = time.time()
optimize_2d_mfovs(args.tiles_fname, matches_list_file, args.output_file_name, args.conf_file_name)
print '2D Optimization took {0:1.4f} seconds'.format(time.time() - start_time)
| Rhoana/rh_aligner | old/2d_align_affine_driver.py | Python | mit | 5,463 |
# coding=utf8
#
from django.conf.urls import patterns
from snaker.shome.views import index, problems
urlpatterns = patterns(
'',
(r'^$', index),
(r'^problems', problems),
) | seraphlnWu/snaker | snaker/shome/urls.py | Python | gpl-2.0 | 187 |
from traits.api import Int, List, Str, Float, TraitError, ListStr
import openpnm as op
from openpnm.utils import SettingsAttr, TypedList, TypedSet
import pytest
class SettingsTest:
def setup_class(self): ...
def test_standard_initialization(self):
class S1:
r"""
This is a docstring
"""
a = 1
b = 2
d = TypedList(types=[str])
sets1 = SettingsAttr(S1)
assert "This is a docstring" in sets1.__doc__
def test_inheritance_and_immutability(self):
class S2:
r"""
This is a docstring
"""
a = 2
b = 3
class S3(S2):
r"""
Different docstring
"""
b = 3
c = 4
sets2 = SettingsAttr(S2)
sets3 = SettingsAttr(S3)
sets3.b = 44
assert sets3._attrs == ['a', 'b', 'c']
assert sets2.b != sets3.b
assert "Different docstring" in sets3.__doc__
with pytest.raises(Exception):
sets3.b = 'nope'
def test_adding_new_attr_type_is_enforced(self):
class S4:
r"""
This is a docstring
"""
a = 2
b = 3.5
sets4 = SettingsAttr(S4)
sets4.c = 1.5
with pytest.raises(Exception):
sets4.c = "string"
def test_update_from_dataclass(self):
class S7:
r"""
This is a docstring
"""
a = 1
b = 2
class Data:
a = 22
c = 5.5
sets7 = SettingsAttr(S7)
assert sets7.a == 1
sets7._update(Data())
assert sets7.a == 22
def test_initial_None(self):
class S8:
r"""
This is a docstring
"""
a = 1
b = None
sets8 = SettingsAttr(S8)
sets8.b = 2.2
assert sets8.b == 2.2
with pytest.raises(Exception):
sets8.b = 'str'
def test_typed_set_inferred_type_after_init(self):
s = TypedSet()
s.add(0)
s2 = set((0, 2))
assert s2.difference(s) == set([2])
s.add(2)
assert s2.difference(s) == set()
s.add(2)
assert len(s) == 2 # Ensure length stays the same
with pytest.raises(Exception):
s.add('1')
def test_typed_set_given_multiple_types_during_init(self):
s = TypedSet(types=[int, float])
s.add(1)
s.add(2.0)
assert len(s) == 2
with pytest.raises(Exception):
s.add([2])
if __name__ == '__main__':
t = SettingsTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| PMEAL/OpenPNM | tests/unit/utils/SettingsTest.py | Python | mit | 2,866 |
#!/usr/bin/env python
"""
Get metadata for the given file specified by its Logical File Name or for a list of files
contained in the specifed file
Usage:
dirac-dms-catalog-metadata <lfn | fileContainingLfns> [Catalog]
Example:
$ dirac-dms-catalog-metadata /formation/user/v/vhamar/Example.txt
FileName Size GUID Status Checksum
/formation/user/v/vhamar/Example.txt 34 EDE6DDA4-3344-3F39-A993-8349BA41EB23 1 eed20d47
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine()
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
import os
args = Script.getPositionalArgs()
if not len(args) >= 1:
Script.showHelp(exitCode=1)
else:
inputFileName = args[0]
catalogs = []
if len(args) == 2:
catalogs = [args[1]]
if os.path.exists(inputFileName):
inputFile = open(inputFileName, 'r')
string = inputFile.read()
lfns = string.splitlines()
inputFile.close()
else:
lfns = [inputFileName]
res = FileCatalog(catalogs=catalogs).getFileMetadata(lfns)
if not res['OK']:
print("ERROR:", res['Message'])
DIRACExit(-1)
print('FileName'.ljust(100),
'Size'.ljust(10),
'GUID'.ljust(40),
'Status'.ljust(8),
'Checksum'.ljust(10))
for lfn in sorted(res['Value']['Successful'].keys()):
metadata = res['Value']['Successful'][lfn]
checksum = ''
if 'Checksum' in metadata:
checksum = str(metadata['Checksum'])
size = ''
if 'Size' in metadata:
size = str(metadata['Size'])
guid = ''
if 'GUID' in metadata:
guid = str(metadata['GUID'])
status = ''
if 'Status' in metadata:
status = str(metadata['Status'])
print('%s %s %s %s %s' % (lfn.ljust(100), size.ljust(10), guid.ljust(40), status.ljust(8), checksum.ljust(10)))
for lfn in sorted(res['Value']['Failed'].keys()):
message = res['Value']['Failed'][lfn]
print(lfn, message)
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/DataManagementSystem/scripts/dirac_dms_catalog_metadata.py | Python | gpl-3.0 | 2,306 |
from django.test import TestCase
from rea_people.models import (
Agent,
Organisation,
Person,
Epitome,
EpitomeCategory,
EpitomeInstance,
Skill,
Interest,
ProgrammingLanguage,
Rating,
OutofTen,
RatingInstance,
)
class SimpleTestCase(TestCase):
def test_addition(self):
self.assertEqual(1 + 1, 2)
| DarrenFrenkel/django-rea-people | rea_people/tests.py | Python | mit | 348 |
from __future__ import unicode_literals
import os
import unittest
import balanced
from billy_client import BillyAPI
from billy_client import Plan
@unittest.skipUnless(
os.environ.get('BILLY_CLIENT_TEST_AGAINST_SERVER'),
'Skip testing against server unless BILLY_CLIENT_TEST_AGAINST_SERVER is set',
)
class TestAgainstServer(unittest.TestCase):
def setUp(self):
self.target_url = os.environ.get(
'BILLY_TEST_URL',
'http://127.0.0.1:6543')
self.processor_key = os.environ.get(
'BILLY_TEST_PROCESSOR_KEY',
'ef13dce2093b11e388de026ba7d31e6f')
self.marketplace_uri = os.environ.get(
'BILLY_TEST_MARKETPLACE_URI',
'/v1/marketplaces/TEST-MP6lD3dBpta7OAXJsN766qA')
balanced.configure(self.processor_key)
def make_one(self, api_key, endpoint=None):
if endpoint is None:
endpoint = self.target_url
return BillyAPI(api_key, endpoint=endpoint)
def test_basic_scenario(self):
api = self.make_one(None)
marketplace = balanced.Marketplace.find(self.marketplace_uri)
# create a card to charge
card = marketplace.create_card(
name='BILLY_INTERGRATION_TESTER',
card_number='5105105105105100',
expiration_month='12',
expiration_year='2020',
security_code='123',
)
company = api.create_company(processor_key=self.processor_key)
api_key = company.api_key
api = self.make_one(api_key)
customer = company.create_customer()
plan = company.create_plan(
plan_type=Plan.TYPE_DEBIT,
frequency=Plan.FREQ_DAILY,
amount=7788,
)
subscription = plan.subscribe(
customer_guid=customer.guid,
funding_instrument_uri=card.uri,
appears_on_statement_as='hello baby',
)
self.assertEqual(subscription.customer_guid, customer.guid)
self.assertEqual(subscription.plan_guid, plan.guid)
self.assertEqual(subscription.funding_instrument_uri, card.uri)
self.assertEqual(subscription.appears_on_statement_as, 'hello baby')
# check invoice
invoices = list(subscription.list_invoices())
self.assertEqual(len(invoices), 1)
invoice = invoices[0]
self.assertEqual(invoice.subscription_guid, subscription.guid)
# check charge transaction
transactions = list(subscription.list_transactions())
self.assertEqual(len(transactions), 1)
transaction = transactions[0]
self.assertEqual(transaction.invoice_guid, invoice.guid)
# check corresponding debit transaction in Balanced
debit = balanced.Debit.find(transaction.processor_uri)
self.assertEqual(debit.meta['billy.transaction_guid'], transaction.guid)
self.assertEqual(debit.amount, 7788)
self.assertEqual(debit.status, 'succeeded')
self.assertEqual(debit.appears_on_statement_as, 'hello baby')
# cancel the subscription
subscription = subscription.cancel()
self.assertEqual(subscription.canceled, True)
# refund the invoice
invoice = invoice.refund(amount=1234)
# check the refund transaction
transactions = list(subscription.list_transactions())
self.assertEqual(len(transactions), 2)
transaction = transactions[0]
self.assertEqual(transaction.invoice_guid, invoice.guid)
self.assertEqual(transaction.submit_status, 'done')
self.assertEqual(transaction.transaction_type, 'refund')
# check the refund transaction in Balanced
refund = balanced.Refund.find(transaction.processor_uri)
self.assertEqual(refund.meta['billy.transaction_guid'],
transaction.guid)
self.assertEqual(refund.amount, 1234)
self.assertEqual(refund.status, 'succeeded')
| victorlin/billy-client | billy_client/tests/test_against_server.py | Python | mit | 3,958 |
import pytest
import re
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selene import browser
from selene import config
from selene.common.none_object import NoneObject
from selene.support.conditions import have
from selene.support.jquery_style_selectors import s, ss
from tests.integration.helpers.givenpage import GivenPage
GIVEN_PAGE = NoneObject('GivenPage') # type: GivenPage
original_timeout = config.timeout
def setup_module(m):
driver = webdriver.Firefox()
browser.set_driver(driver)
global GIVEN_PAGE
GIVEN_PAGE = GivenPage(driver)
def teardown_module(m):
config.timeout = original_timeout
browser.driver().quit()
def exception_message(ex):
return [line.strip() if not re.match('\s*screenshot: .*?/\.selene/screenshots/\d+?/screen_\d+\.png\s*',line)
else 'screenshot: //.selene/screenshots/*/screen_*.png'
for line in str(ex.value.msg).strip().splitlines()]
def test_selement_search_fails_with_message_when_explicitly_waits_for_condition():
GIVEN_PAGE.opened_with_body('''
<label id='element'>Hello world!</label>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
s('#element').should(have.exact_text('Hello wor'))
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert ExactText',
"for first_by('css selector', '#element')",
'',
'reason: ConditionMismatchException: condition did not match',
"expected: Hello wor",
"actual: Hello world!",
'screenshot: //.selene/screenshots/*/screen_*.png']
def test_selement_search_fails_with_message_when_implicitly_waits_for_condition():
GIVEN_PAGE.opened_with_body('''
<button id='hidden-button' style='display:none'>You can't click me, ha ha! :P</button>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
s('#hidden-button').click()
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#hidden-button')",
'',
'reason: ConditionMismatchException: condition did not match',
'screenshot: //.selene/screenshots/*/screen_*.png']
def test_inner_selement_search_fails_with_message_when_implicitly_waits_for_condition_mismatch_on_inner_element():
GIVEN_PAGE.opened_with_body('''
<div id='container'>
<button id='hidden-button' style='display:none'>You can't click me, ha ha! :P</button>
</div>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
s('#container').element('#hidden-button').click()
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#container').find_by('css selector', '#hidden-button')",
'',
'reason: ConditionMismatchException: condition did not match',
'screenshot: //.selene/screenshots/*/screen_*.png']
def test_inner_selement_search_fails_with_message_when_implicitly_waits_for_condition_mismatched_on_parent_element():
GIVEN_PAGE.opened_with_body('''
<div id='hidden-container' style='display:none'>
<button id='button'>You still can't click me, ha ha! :P</button>
</div>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
s('#hidden-container').element('#button').click()
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#hidden-container').find_by('css selector', '#button')",
'',
'reason: TimeoutException:',
'failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#hidden-container')",
'',
'reason: ConditionMismatchException: condition did not match',
'screenshot: //.selene/screenshots/*/screen_*.png']
def test_inner_selement_search_fails_with_message_when_implicitly_waits_for_condition_failed_on_parent_element():
GIVEN_PAGE.opened_with_body('''
<div>
<button id='button'>Try to click me</button>
</div>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
s('#not-existing').element('#button').click()
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#not-existing').find_by('css selector', '#button')",
'',
'reason: TimeoutException:',
'failed while waiting 0.1 seconds',
'to assert Visible',
"for first_by('css selector', '#not-existing')",
'',
'reason: NoSuchElementException: Unable to locate element: {"method":"css selector","selector":"#not-existing"}',
'screenshot: //.selene/screenshots/*/screen_*.png']
def test_indexed_selement_search_fails_with_message_when_implicitly_waits_for_condition_failed_on_collection():
GIVEN_PAGE.opened_with_body('''
<div>
<button id='button'>Try to click me</button>
</div>
''')
config.timeout = 0.1
with pytest.raises(TimeoutException) as ex:
ss('button')[1].click()
assert exception_message(ex) == \
['failed while waiting 0.1 seconds',
'to assert Visible',
"for all_by('css selector', 'button')[1]",
'',
'reason: TimeoutException:',
'failed while waiting 0.1 seconds',
'to assert SizeAtLeast',
"for all_by('css selector', 'button')",
'',
'reason: ConditionMismatchException: condition did not match',
'expected: >= 2',
'actual: 1',
'screenshot: //.selene/screenshots/*/screen_*.png']
# todo: uncomment when refactored conditions implementation
# def test_selement_search_fails_with_message_when_explicitly_waits_for_not_condition():
# GIVEN_PAGE.opened_with_body('''
# <label id='element'>Hello world!</label>
# ''')
# config.timeout = 0.1
#
# s('#element').should_not(have.exact_text('Hello world!'))
# with pytest.raises(TimeoutException) as ex:
# s('#element').should_not(have.exact_text('Hello world!'))
#
# assert exception_message(ex) == \
# ['failed while waiting 0.1 seconds',
# 'to assert not ExactText',
# "for first_by('css selector', '#element')",
# '',
# 'reason: ConditionMismatchException: condition did not match',
# 'expected: not(Hello world!)',
# 'actual: Hello world!',
# 'screenshot: //.selene/screenshots/*/screen_*.png']
| SergeyPirogov/selene | tests/integration/error_messages_test.py | Python | mit | 6,833 |
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib.pyplot as plt
import time
# Hyperparameters
RANDOM_NUMBER_SEED = 2
# ENVIRONMENT = "CartPole-v0"
# ENVIRONMENT = "CartPole-v1"
ENVIRONMENT1 = "morph-v0"
ENVIRONMENT2 = "morph-l1-v0"
MAX_EPISODES = 1000 # number of episodes before morphing (or after it)
HIDDEN_LAYER = True
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
alpha = 0.01 # Learning rate
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 50 # Including previous 50 rewards
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
if HIDDEN_LAYER:
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
else:
dist_W = tf.get_variable("W1", shape=[input_size, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
dist = tf.tanh(tf.matmul(x, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
global HIDDEN_LAYER
if HIDDEN_LAYER:
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
else:
w1 = session.run(dist_W)
b1 = session.run(dist_B)
print(w1, b1)
returns_before_morph = []
mean_returns_before_morph = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_before_morph.append(raw_G)
running_returns = returns_before_morph[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_returns_before_morph.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
env = gym.make(ENVIRONMENT2)
env.seed(RANDOM_NUMBER_SEED)
print("---------- MORPHING ----------")
time.sleep(3)
returns_after_morph = []
mean_returns_after_morph = []
for ep in range(MAX_EPISODES):
# saver.restore(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_after_morph.append(raw_G)
running_returns = returns_after_morph[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_returns_after_morph.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+MAX_EPISODES+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
# Instant returns
# returns_for_plot = np.concatenate((returns_before_morph, returns_after_morph), axis=0)
# Mean returns
returns_for_plot = np.concatenate((mean_returns_before_morph, mean_returns_after_morph), axis=0)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
ax.set_title("Cart-Pole with Morphing")
ax.set_xlabel("Episode")
ax.set_ylabel("Returns (Running Average)")
ax.plot(returns_for_plot)
ax.axvline(x=MAX_EPISODES, linestyle='--')
ax.set_ylim((0, 200))
ax.text(0, 25, "Parameters before morphing:\n \
Pole length = 1.0 m \n \
Pole mass = 0.1 kg \n \
Cart mass = 1.0 kg \n \
Force magnitude = 10 N", \
bbox={'facecolor':(.118,.565,1.000, .3)})
ax.text(MAX_EPISODES*21.0/20.0, 25, "Parameters after morphing:\n \
Pole length = 6.0 m \n \
Pole mass = 0.1 kg \n \
Cart mass = 1.0 kg \n \
Force magnitude = 10 N", \
bbox={'facecolor':(.118,.565,1.000, .3)})
ax.annotate('Morphing', xy=(MAX_EPISODES, 100), \
xytext=(MAX_EPISODES*6.0/5.0, 125), \
arrowprops=dict(facecolor=(.118,.565,1.000, .3)))
plt.show()
# plt.plot(returns_for_plot)
# plt.axvline(x=MAX_EPISODES, linestyle='--')
# plt.title("Cart-Pole with Morphing")
# plt.xlabel("Episode")
# plt.ylabel("Returns")
# plt.ylim((0, 200))
# plt.show()
| GitYiheng/reinforcement_learning_test | test01_cartpendulum/Feb/t8_cartpole_mc_plot.py | Python | mit | 7,022 |
#!/usr/bin/env python
# *-* coding:utf-8 *-*
"""
Date :
Author : Vianney Gremmel loutre.a@gmail.com
"""
from time import time
start = time()
def squareroot_fractions():
h1, h2, k1, k2 = 1, 1, 1, 0
while 1:
h1, h2 = 2*h1 + h2, h1
k1, k2 = 2*k1 + k2, k1
yield h2, k2
big_numerator = lambda n, d: len(str(n)) > len(str(d))
sf = squareroot_fractions()
print sum(1 for _ in xrange(1000) if big_numerator(*sf.next()))
print time() - start
| vianney-g/python-exercices | eulerproject/pb0057.py | Python | gpl-2.0 | 472 |
def max_(lst):
if len(lst) == 0:
return None
if len(lst) == 1:
return lst[0]
else:
sub_max = max_(lst[:1])
return lst[0] if lst[0] > sub_max else sub_max
| liangjisheng/Data-Struct | books/algorithmicGraphics/chapter4/04_recursive_max.py | Python | gpl-2.0 | 198 |
from src.matrix_spiral import matrix_spiral
matrix1 = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
result1 = [1, 2, 3, 6, 9, 8, 7, 4, 5]
matrix2 = [
[1, 2, 3, 4],
[4, 5, 6, 7],
[7, 8, 9, 10],
[11, 12, 13, 14],
[15, 16, 17, 18]
]
result2 = [1, 2, 3, 4, 7, 10, 14, 18, 17, 16, 15, 11, 7, 4, 5, 6, 9, 13, 12, 8]
matrix3 = [[], []]
result3 = []
matrix4 = []
result4 = []
def test_matrix_spiral1():
assert matrix_spiral(matrix1) == result1
def test_matrix_spiral2():
assert matrix_spiral(matrix2) == result2
def test_matrix_spiral3():
assert matrix_spiral(matrix3) == result3
def test_matrix_spiral4():
assert matrix_spiral(matrix4) == result4
| tanyaweaver/code-katas | test/test_matrix_spiral.py | Python | mit | 692 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | pombredanne/1trillioneuros | manage.py | Python | gpl-3.0 | 246 |
# Generated by Django 2.2.5 on 2019-09-17 15:23
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adjudication', '0003_auto_20190917_0644'),
]
operations = [
migrations.AddField(
model_name='round',
name='image_id',
field=models.CharField(blank=True, default='missing_image', max_length=255),
),
migrations.AddField(
model_name='round',
name='registration',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='round',
name='num',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='round',
name='spots',
field=models.IntegerField(default=10),
),
]
| dbinetti/barberscore-django | project/apps/adjudication/migrations/0004_auto_20190917_0823.py | Python | bsd-2-clause | 965 |
# mozilla/prettyprinters.py --- infrastructure for SpiderMonkey's auto-loaded pretty-printers.
import gdb
import re
# Decorators for declaring pretty-printers.
#
# In each case, the decoratee should be a SpiderMonkey-style pretty-printer
# factory, taking both a gdb.Value instance and a TypeCache instance as
# arguments; see TypeCache, below.
# Check that |fn| hasn't been registered as a pretty-printer under some
# other name already. (The 'enabled' flags used by GDB's
# 'enable/disable/info pretty-printer' commands are simply stored as
# properties of the function objects themselves, so a single function
# object can't carry the 'enabled' flags for two different printers.)
def check_for_reused_pretty_printer(fn):
if hasattr(fn, 'enabled'):
raise RuntimeError("pretty-printer function %r registered more than once" % fn)
# a dictionary mapping gdb.Type tags to pretty-printer functions.
printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for types
# named |type_name|.
def pretty_printer(type_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, type_name)
printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping gdb.Type tags to pretty-printer functions for pointers to
# that type.
ptr_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# pointers to types named |type_name|.
def ptr_pretty_printer(type_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, "ptr-to-" + type_name)
ptr_printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping gdb.Type tags to pretty-printer functions for
# references to that type.
ref_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# references to instances of types named |type_name|.
def ref_pretty_printer(type_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, "ref-to-" + type_name)
ref_printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping the template name portion of gdb.Type tags to
# pretty-printer functions for instantiations of that template.
template_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# instantiations of templates named |template_name|.
def template_pretty_printer(template_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, 'instantiations-of-' + template_name)
template_printers_by_tag[template_name] = fn
return fn
return add
# A list of (REGEXP, PRINTER) pairs, such that if REGEXP (a RegexObject)
# matches the result of converting a gdb.Value's type to a string, then
# PRINTER is a pretty-printer lookup function that will probably like that
# value.
printers_by_regexp = []
# A decorator: add the decoratee as a pretty-printer factory for types
# that, when converted to a string, match |pattern|. Use |name| as the
# pretty-printer's name, when listing, enabling and disabling.
def pretty_printer_for_regexp(pattern, name):
compiled = re.compile(pattern)
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, name)
printers_by_regexp.append((compiled, fn))
return fn
return add
# Forget all pretty-printer lookup functions defined in the module name
# |module_name|, if any exist. Use this at the top of each pretty-printer
# module like this:
#
# clear_module_printers(__name__)
def clear_module_printers(module_name):
global printers_by_tag, ptr_printers_by_tag, ref_printers_by_tag
global template_printers_by_tag, printers_by_regexp
# Remove all pretty-printers defined in the module named |module_name|
# from d.
def clear_dictionary(d):
# Walk the dictionary, building a list of keys whose entries we
# should remove. (It's not safe to delete entries from a dictionary
# while we're iterating over it.)
to_delete = []
for (k, v) in d.items():
if v.__module__ == module_name:
to_delete.append(k)
remove_from_subprinter_list(v)
for k in to_delete:
del d[k]
clear_dictionary(printers_by_tag)
clear_dictionary(ptr_printers_by_tag)
clear_dictionary(ref_printers_by_tag)
clear_dictionary(template_printers_by_tag)
# Iterate over printers_by_regexp, deleting entries from the given module.
new_list = []
for p in printers_by_regexp:
if p.__module__ == module_name:
remove_from_subprinter_list(p)
else:
new_list.append(p)
printers_by_regexp = new_list
# Our subprinters array. The 'subprinters' attributes of all lookup
# functions returned by lookup_for_objfile point to this array instance,
# which we mutate as subprinters are added and removed.
subprinters = []
# Set up the 'name' and 'enabled' attributes on |subprinter|, and add it to our
# list of all SpiderMonkey subprinters.
def add_to_subprinter_list(subprinter, name):
subprinter.name = name
subprinter.enabled = True
subprinters.append(subprinter)
# Remove |subprinter| from our list of all SpiderMonkey subprinters.
def remove_from_subprinter_list(subprinter):
subprinters.remove(subprinter)
# An exception class meaning, "This objfile has no SpiderMonkey in it."
class NotSpiderMonkeyObjfileError(TypeError):
pass
# TypeCache: a cache for frequently used information about an objfile.
#
# When a new SpiderMonkey objfile is loaded, we construct an instance of
# this class for it. Then, whenever we construct a pretty-printer for some
# gdb.Value, we also pass, as a second argument, the TypeCache for the
# objfile to which that value's type belongs.
#
# if objfile doesn't seem to have SpiderMonkey code in it, the constructor
# raises NotSpiderMonkeyObjfileError.
#
# Pretty-printer modules may add attributes to this to hold their own
# cached values. Such attributes should be named mod_NAME, where the module
# is named mozilla.NAME; for example, mozilla.JSString should store its
# metadata in the TypeCache's mod_JSString attribute.
class TypeCache(object):
def __init__(self, objfile):
self.objfile = objfile
# Unfortunately, the Python interface doesn't allow us to specify
# the objfile in whose scope lookups should occur. But simply
# knowing that we need to lookup the types afresh is probably
# enough.
self.void_t = gdb.lookup_type('void')
self.void_ptr_t = self.void_t.pointer()
try:
self.JSString_ptr_t = gdb.lookup_type('JSString').pointer()
self.JSSymbol_ptr_t = gdb.lookup_type('JS::Symbol').pointer()
self.JSObject_ptr_t = gdb.lookup_type('JSObject').pointer()
except gdb.error:
raise NotSpiderMonkeyObjfileError
self.mod_GCCellPtr = None
self.mod_Interpreter = None
self.mod_JSObject = None
self.mod_JSString = None
self.mod_jsval = None
self.mod_ExecutableAllocator = None
self.mod_IonGraph = None
# Yield a series of all the types that |t| implements, by following typedefs
# and iterating over base classes. Specifically:
# - |t| itself is the first value yielded.
# - If we yield a typedef, we later yield its definition.
# - If we yield a type with base classes, we later yield those base classes.
# - If we yield a type with some base classes that are typedefs,
# we yield all the type's base classes before following the typedefs.
# (Actually, this never happens, because G++ doesn't preserve the typedefs in
# the DWARF.)
#
# This is a hokey attempt to order the implemented types by meaningfulness when
# pretty-printed. Perhaps it is entirely misguided, and we should actually
# collect all applicable pretty-printers, and then use some ordering on the
# pretty-printers themselves.
#
# We may yield a type more than once (say, if it appears more than once in the
# class hierarchy).
def implemented_types(t):
# Yield all types that follow |t|.
def followers(t):
if t.code == gdb.TYPE_CODE_TYPEDEF:
yield t.target()
for t2 in followers(t.target()): yield t2
elif t.code == gdb.TYPE_CODE_STRUCT:
base_classes = []
for f in t.fields():
if f.is_base_class:
yield f.type
base_classes.append(f.type)
for b in base_classes:
for t2 in followers(b): yield t2
yield t
for t2 in followers(t): yield t2
template_regexp = re.compile("([\w_:]+)<")
# Construct and return a pretty-printer lookup function for objfile, or
# return None if the objfile doesn't contain SpiderMonkey code
# (specifically, definitions for SpiderMonkey types).
def lookup_for_objfile(objfile):
# Create a type cache for this objfile.
try:
cache = TypeCache(objfile)
except NotSpiderMonkeyObjfileError:
if gdb.parameter("verbose"):
gdb.write("objfile '%s' has no SpiderMonkey code; not registering pretty-printers\n"
% (objfile.filename,))
return None
# Return a pretty-printer for |value|, if we have one. This is the lookup
# function object we place in each gdb.Objfile's pretty-printers list, so it
# carries |name|, |enabled|, and |subprinters| attributes.
def lookup(value):
# If |table| has a pretty-printer for |tag|, apply it to |value|.
def check_table(table, tag):
if tag in table:
f = table[tag]
if f.enabled:
return f(value, cache)
return None
def check_table_by_type_name(table, t):
if t.code == gdb.TYPE_CODE_TYPEDEF:
return check_table(table, str(t))
elif t.code == gdb.TYPE_CODE_STRUCT and t.tag:
return check_table(table, t.tag)
else:
return None
for t in implemented_types(value.type):
if t.code == gdb.TYPE_CODE_PTR:
for t2 in implemented_types(t.target()):
p = check_table_by_type_name(ptr_printers_by_tag, t2)
if p: return p
elif t.code == gdb.TYPE_CODE_REF:
for t2 in implemented_types(t.target()):
p = check_table_by_type_name(ref_printers_by_tag, t2)
if p: return p
else:
p = check_table_by_type_name(printers_by_tag, t)
if p: return p
if t.code == gdb.TYPE_CODE_STRUCT and t.tag:
m = template_regexp.match(t.tag)
if m:
p = check_table(template_printers_by_tag, m.group(1))
if p: return p
# Failing that, look for a printer in printers_by_regexp. We have
# to scan the whole list, so regexp printers should be used
# sparingly.
s = str(value.type)
for (r, f) in printers_by_regexp:
if f.enabled:
m = r.match(s)
if m:
p = f(value, cache)
if p: return p
# No luck.
return None
# Give |lookup| the attributes expected of a pretty-printer with
# subprinters, for enabling and disabling.
lookup.name = "SpiderMonkey"
lookup.enabled = True
lookup.subprinters = subprinters
return lookup
# A base class for pretty-printers for pointer values that handles null
# pointers, by declining to construct a pretty-printer for them at all.
# Derived classes may simply assume that self.value is non-null.
#
# To help share code, this class can also be used with reference types.
#
# This class provides the following methods, which subclasses are free to
# override:
#
# __init__(self, value, cache): Save value and cache as properties by those names
# on the instance.
#
# to_string(self): format the type's name and address, as GDB would, and then
# call a 'summary' method (which the subclass must define) to produce a
# description of the referent.
#
# Note that pretty-printers returning a 'string' display hint must not use
# this default 'to_string' method, as GDB will take everything it returns,
# including the type name and address, as string contents.
class Pointer(object):
def __new__(cls, value, cache):
# Don't try to provide pretty-printers for NULL pointers.
if value.type.strip_typedefs().code == gdb.TYPE_CODE_PTR and value == 0:
return None
return super(Pointer, cls).__new__(cls)
def __init__(self, value, cache):
self.value = value
self.cache = cache
def to_string(self):
# See comment above.
assert not hasattr(self, 'display_hint') or self.display_hint() != 'string'
concrete_type = self.value.type.strip_typedefs()
if concrete_type.code == gdb.TYPE_CODE_PTR:
address = self.value.cast(self.cache.void_ptr_t)
elif concrete_type.code == gdb.TYPE_CODE_REF:
address = '@' + str(self.value.address.cast(self.cache.void_ptr_t))
else:
assert not "mozilla.prettyprinters.Pointer applied to bad value type"
try:
summary = self.summary()
except gdb.MemoryError as r:
summary = str(r)
v = '(%s) %s %s' % (self.value.type, address, summary)
return v
def summary(self):
raise NotImplementedError
field_enum_value = None
# Given |t|, a gdb.Type instance representing an enum type, return the
# numeric value of the enum value named |name|.
#
# Pre-2012-4-18 versions of GDB store the value of an enum member on the
# gdb.Field's 'bitpos' attribute; later versions store it on the 'enumval'
# attribute. This function retrieves the value from either.
def enum_value(t, name):
global field_enum_value
f = t[name]
# Monkey-patching is a-okay in polyfills! Just because.
if not field_enum_value:
if hasattr(f, 'enumval'):
field_enum_value = lambda f: f.enumval
else:
field_enum_value = lambda f: f.bitpos
return field_enum_value(f)
| Yukarumya/Yukarum-Redfoxes | js/src/gdb/mozilla/prettyprinters.py | Python | mpl-2.0 | 14,426 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
class LiveStatusConstraints:
""" Represent the constraints applied on a livestatus request """
def __init__(self, filter_func, without_filter):
self.filter_func = filter_func
self.without_filter = without_filter
| baloo/shinken | shinken/modules/livestatus_broker/livestatus_constraints.py | Python | agpl-3.0 | 1,143 |
from flask import Flask
app = Flask(__name__)
app.config.from_object('CoreCatalog.default_settings')
import CoreCatalog.views
| CORE-POS/CoreCatalog | CoreCatalog/__init__.py | Python | apache-2.0 | 127 |
#=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
#ROI module for PyFRAP toolbox, including following ROI objects:
#(1) ROI
#(2) radialROI
#(3) sliceROI
#(4) radialSliceROI
#(5) squareROI
#(6) squareSliceROI
#(7) rectangleROI
#(8) rectangleSliceROI
#(9) polyROI
#(10) polySliceROI
#(11) customROI
#===========================================================================================================================================================================
#Improting necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#PyFRAP Modules
from pyfrp.modules import pyfrp_misc_module
from pyfrp.modules import pyfrp_idx_module
from pyfrp.modules import pyfrp_plot_module
from pyfrp.modules import pyfrp_img_module
from pyfrp.modules import pyfrp_integration_module
from pyfrp.modules import pyfrp_fit_module
from pyfrp.modules import pyfrp_gmsh_geometry
from pyfrp.modules import pyfrp_gmsh_module
from pyfrp.modules import pyfrp_openscad_module
from pyfrp.modules.pyfrp_term_module import *
#Plotting
import matplotlib.pyplot as plt
import matplotlib.patches as ptc
#Time
import time
#Copy
import copy
#OS
import os
import shutil
#Solid/Opescad
import solid
import solid.utils
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Main ROI class
class ROI(object):
def __init__(self,embryo,name,Id,zmin='-inf',zmax='inf',color='b'):
#Name/Id
self.name=name
self.Id=Id
self.embryo=embryo
self.color=color
#zExtend
self.zmin=pyfrp_misc_module.translateNPFloat(zmin)
self.zmax=pyfrp_misc_module.translateNPFloat(zmax)
#Idxs from data analysis/simulation
self.imgIdxX=[]
self.imgIdxY=[]
self.extImgIdxX=[]
self.extImgIdxY=[]
self.meshIdx=[]
#Mask
self.imgMask=None
self.extMask=None
#Number of extended pixels
self.numExt=None
#Result Dataseries
self.dataVec=[]
self.simVec=[]
self.dataVecPinned=[]
self.simVecPinned=[]
#Rim concentration
self.useForRim=False
def setId(self,Id):
"""Sets Id of ROI.
Args:
Id (int): New Id.
Returns:
int: New Id.
"""
self.Id=Id
return self.Id
def setName(self,n):
"""Sets name of ROI.
Args:
n (str): New name.
Returns:
str: New name.
"""
self.name=n
return self.name
def setZExtend(self,zmin,zmax):
"""Sets extend in z-direction.
Args:
zmin (float): Minimum z-coordinate.
zmax (float): Maximum z-coordinate.
Returns:
list: New z-extend given by ``[zmin,zmax]``.
"""
self.zmin=zmin
self.zmax=zmax
return [self.zmin,self.zmax]
def getZExtend(self):
"""Returns extend in z-direction.
Returns:
list: Z-extend given by ``[zmin,zmax]``.
"""
return [self.zmin,self.zmax]
def getRealZExend(self):
r"""Returns real extend in z-direction.
Real extend returns
.. math:: z_{\mathrm{min}}=\mathrm{max} (z_{\mathrm{min,ROI}},z_{\mathrm{min,geometry}})
and
.. math:: z_{\mathrm{max}}=\mathrm{min} (z_{\mathrm{max,ROI}},z_{\mathrm{max,geometry}})
Returns:
list: Z-extend given by ``[zmin,zmax]``.
"""
zminGeo,zmaxGeo=self.embryo.geometry.getZExtend()
zmin=max([zminGeo,self.zmin])
zmax=min([zmaxGeo,self.zmax])
return [zmin,zmax]
def getOpenscadZExtend(self):
"""Returns extend in z-direction suitable for rendering
the ROI via openscad.
If either ``zmin`` or ``zmax`` is infinity, then uses
:py:func:getRealZExend to return more meaningful extend.
Returns:
list: Z-extend given by ``[zmin,zmax]``.
"""
if np.inf==abs(self.zmin):
zminReal,zmaxReal=self.getRealZExend()
zmin=zminReal
else:
zmin=self.zmin
if np.inf==abs(self.zmax):
zminReal,zmaxReal=self.getRealZExend()
zmax=zmaxReal
else:
zmax=self.zmax
return zmin,zmax
def getId(self):
"""Returns Id of ROI.
Returns:
int: Id.
"""
return self.Id
def getName(self):
"""Returns name of ROI.
Returns:
str: Current name.
"""
return self.name
def getImgIdx(self):
"""Returns image indices of ROI.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
return self.imgIdxX,self.imgIdxY
def getExtImgIdx(self):
"""Returns extended image indices of ROI.
Returns:
tuple: Tuple containing:
* extImgIdxX (list): Extended image indices in x-direction.
* extImgIdxY (list): Extended image indices in y-direction.
"""
return self.extImgIdxX,self.extImgIdxY
def getMeshIdx(self):
"""Returns mesh indices of ROI.
Returns:
list: Mesh indices of ROI.
"""
return self.meshIdx
def getMeshDensity(self):
r"""Returns average mesh density inside ROI.
Mesh density is defined by
.. math:: \rho=N/V,
where :math:`N` is the number of mesh nodes inside ROI
and :math:`V` is the volume of ROI, see also
:py:func:`getVolume`.
Returns:
float: Mesh density.
"""
volume=self.getVolume()
return len(self.meshIdx)/volume
def getVolume(self):
r"""Returns volume of ROI.
Since ROIs only behave linearly in z-direction, volume
is given by
.. math:: V = A * h,
where :math:`h` is ROI height (see :py:func:`getROIHeight`) and
:math:`A` is ROI area (see :py:func:`getArea`).
Returns:
float: ROI volume.
"""
area=self.getArea()
height=self.getROIHeight()
return area*height
def getROIHeight(self):
"""Returns height of ROI.
Returns:
float: Height of ROI.
"""
if np.isfinite(self.zmax):
zMax=self.zmax
else:
dump,zMax=self.getMeshIdxZExtend()
if np.isfinite(self.zmin):
zMin=self.zmin
else:
zMin,dump=self.getMeshIdxZExtend()
return abs(zMax-zMin)
def getArea(self):
"""Returns area of ROI.
Area is computed as area covered by
``imgMask + extMask``
Returns:
float: Area of ROI.
"""
if self.imgMask==None:
self.computeImgMask()
if self.extMask==None:
self.computeExtMask()
if self.extMask==None:
return self.imgMask.sum()
return self.imgMask.sum()+self.extMask.sum()
def getMeshIdxZExtend(self):
"""Returns extend of ROI's ``meshIdx`` in z-coordinate.
Returns:
tuple: Tuple containing:
* (float): Minimum z-coordinate.
* (float): Maximum z-coordinate.
"""
mesh=self.embryo.simulation.mesh.mesh
z=np.asarray(mesh.z)[self.meshIdx]
return min(z) , max(z)
def getMeshIdxYExtend(self):
"""Returns extend of ROI's ``meshIdx`` in y-coordinate.
Returns:
tuple: Tuple containing:
* (float): Minimum y-coordinate.
* (float): Maximum y-coordinate.
"""
mesh=self.embryo.simulation.mesh
y=np.asarray(mesh.getCellCenters[1])[self.meshIdx]
return min(y) , max(y)
def getMeshIdxXExtend(self):
"""Returns extend of ROI's ``meshIdx`` in x-coordinate.
Returns:
tuple: Tuple containing:
* (float): Minimum x-coordinate.
* (float): Maximum x-coordinate.
"""
mesh=self.embryo.simulation.mesh
x=np.asarray(mesh.getCellCenters[0])[self.meshIdx]
return min(x) , max(x)
def getMeshIdxExtend(self):
"""Returns extend of ROI's ``meshIdx``.
Returns:
tuple: Tuple containing:
* (float): Minimum x-coordinate.
* (float): Maximum x-coordinate.
* (float): Minimum y-coordinate.
* (float): Maximum y-coordinate.
* (float): Minimum z-coordinate.
* (float): Maximum z-coordinate.
"""
xmin,xmax=self.getMeshIdxXExtend()
ymin,ymax=self.getMeshIdxYExtend()
zmin,zmax=self.getMeshIdxZExtend()
return xmin,xmax,ymin,ymax,zmin,zmax
def getType(self):
"""Returns type of ROI, splitting off all module names etc. .
Returns:
str: Type of ROI.
"""
typ=str(type(self))
before,typ,after=typ.split("'")
typ=typ.replace('pyfrp_ROI.','')
typ=typ.replace('ROI','')
typ=typ.replace('pyfrp.subclasses.','')
return typ
def setColor(self,color):
"""Sets color of ROI.
Color can be either ``str``, ``float`` or ``tuple``. See also:
http://matplotlib.org/api/colors_api.html
Args:
color (str): New color.
Returns:
str: New color.
"""
self.color=color
return self.color
def setUseForRim(self,b):
"""Marks the ROI to be used for rim calculation.
Args:
b (bool): True if ROI should be used, False else.
Returns:
bool: Current flag value.
"""
if self.numExt>0 and b==True:
printWarning('Be careful, region '+self.name+' is set for rim calculation but has indices outside of image.')
self.useForRim=b
return self.useForRim
def getUseForRim(self):
"""Returns if the ROI is used for rim calculation.
Returns:
bool: Current flag value.
"""
return self.useForRim
def getColor(self):
"""Returns color of ROI.
"""
return self.color
def emptyIdxs(self):
"""Flushes all indices, inserting empty lists for
all of them.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
* meshIdx (list): Mesh indices.
"""
self.imgIdxX=[]
self.imgIdxY=[]
self.meshIdx=[]
return self.getAllIdxs()
def copyIdxs(self,r):
"""Copies indices of other ROI and inserts them into ROI.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): ROI to take indices from.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
* meshIdx (list): Mesh indices.
"""
self.imgIdxX=r.imgIdxX
self.imgIdxY=r.imgIdxY
self.extImgIdxX=r.extImgIdxX
self.extImgIdxY=r.extImgIdxY
self.meshIdx=r.meshIdx
return self.getAllIdxs()
def getAllIdxs(self):
"""Returns all index arrays of ROI.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
* meshIdx (list): Mesh indices.
"""
return self.imgIdxX,self.imgIdxY,self.meshIdx
def getImgMask(self):
"""Returns image mask of ROI.
Returns:
numpy.ndarray: Image mask.
"""
return self.imgMask
def getExtMask(self):
"""Returns extended mask of ROI.
Returns:
numpy.ndarray: Extended mask.
"""
return self.extMask
def computeNumExt(self):
"""Computes number of extended pixels of ROI.
Returns:
int: Number of extended pixels.
"""
self.numExt=len(self.extImgIdxX)
return self.numExt
def getNumExt(self):
"""Returns number of extended pixels of ROI.
Returns:
int: Number of extended pixels.
"""
return self.numExt
def setDataVec(self,vec):
"""Sets data vector of ROI.
Args:
vec (numpy.ndarray): New data vector.
Returns:
numpy.ndarray: New data vector.
"""
self.dataVec=vec
return self.dataVec
def getDataVec(self):
"""Returns current data vector of ROI.
Returns:
numpy.ndarray: Current data vector.
"""
return self.dataVec
def setSimVec(self,vec):
"""Sets simulation vector of ROI.
Args:
vec (numpy.ndarray): New simulation vector.
Returns:
numpy.ndarray: New simulation vector.
"""
self.simVec=vec
return self.simVec
def getSimVec(self):
"""Returns current simulation vector of ROI.
Returns:
numpy.ndarray: Current simulation vector.
"""
return self.simVec
def computeImgMask(self):
"""Computes image mask of ROI.
Image mask is a ``dataResPx * dataResPx`` array with the value
``1`` for pixels inside ROI and ``0`` elsewhere.
Returns:
numpy.ndarray: Image mask.
"""
vals=np.zeros((self.embryo.dataResPx,self.embryo.dataResPx))
self.imgMask=pyfrp_idx_module.ind2mask(vals,self.imgIdxX,self.imgIdxY,1)
return self.imgMask
def computeExtMask(self):
"""Computes mask of extended pixels of ROI.
Mask is a 2D array with the value
``1`` for pixels inside ROI and ``0`` elsewhere.
.. note:: Returns ``None,None,None`` if there are no extended pixels.
Also returns coordinate arrays, since offset of extended mask is not ``[0,0]``.
See also http://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html .
Returns:
tuple: Tuple containing:
* mX (numpy.ndarray): Coordinate array corresponding to pixels of extended mask.
* mY (numpy.ndarray): Coordinate array corresponding to pixels of extended mask.
* extMask (numpy.ndarray): Extended mask.
"""
if len(self.extImgIdxX)==0:
return None,None,None
minX=min(self.extImgIdxX)
maxX=max(self.extImgIdxX)
minY=min(self.extImgIdxY)
maxY=max(self.extImgIdxY)
X=np.arange(minX,maxX+1)
Y=np.arange(minY,maxY+1)
mX,mY=np.meshgrid(X,Y)
vals=np.zeros((len(X),len(Y)))
idXtemp=np.asarray(self.extImgIdxX)+abs(minX)
idYtemp=np.asarray(self.extImgIdxY)+abs(minY)
idXtemp=idXtemp.astype('int')
idYtemp=idYtemp.astype('int')
self.extMask=pyfrp_idx_module.ind2mask(vals,idXtemp,idYtemp,1)
return mX,mY,self.extMask
def showImgIdx(self,ax=None):
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["imgIdx"],sup=self.name+" imgIdx")
ax=axes[0]
self.computeImgMask()
ax.imshow(self.imgMask)
plt.draw()
return ax
def showExtImgIdx(self,ax=None):
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["extImgIdx"],sup=self.name+" imgIdx")
ax=axes[0]
mX,mY,self.extMask=self.computeExtMask()
if mX!=None:
ax.contourf(mX,mY,self.extMask)
plt.draw()
return ax
def showMeshIdx(self,ax=None):
x,y,z=self.embryo.simulation.mesh.getCellCenters()
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["MeshIdx"],sup=self.name+" MeshIdx",proj=['3d'])
ax=axes[0]
#Somehow need to convert to np array since slicing does not work for fipy variables
x=np.asarray(x)[self.meshIdx]
y=np.asarray(y)[self.meshIdx]
z=np.asarray(z)[self.meshIdx]
ax.scatter(x,y,z,c=self.color)
plt.draw()
return ax
def showMeshIdx2D(self,ax=None):
x,y,z=self.embryo.simulation.mesh.getCellCenters()
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["MeshIdx"],sup=self.name+" MeshIdx")
ax=axes[0]
#Somehow need to convert to np array since slicing does not work for fipy variables
x=np.asarray(x)[self.meshIdx]
y=np.asarray(y)[self.meshIdx]
ax.scatter(x,y,c=self.color)
plt.draw()
return ax
def computeIdxs(self,matchMesh=False,debug=False):
"""Computes image and mesh indices of ROI.
Will do this by:
* Compute image indices.
* Match image indices with master ROI.
* Compute external indices.
* Compute mesh indices.
* Match mesh indices with the ones of master ROI.
.. note:: If no master ROI is defined, will not do anything.
.. note:: If master ROI has not been indexed yet, will first index it, then continue.
.. note:: Will skip mesh index computation if there is no mesh generated yet.
Keyword Args:
matchMesh (bool): Match mesh indices with master ROI.
debug (bool): Print out debugging messages.
Return:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x direction.
* imgIdxY (list): Image indices in y direction.
* meshIdx (list): Mesh indices.
"""
if self.embryo.getMasterROIIdx()==None:
printWarning("No Master ROI has been defined yet. Will not continue compute ROI indices.")
return self.getAllIdxs()
else:
masterROI=self.embryo.getMasterROI()
if self!=masterROI:
if len(masterROI.imgIdxX)==0:
printWarning("Idxs of Master ROI have not been computed. Will compute them first.")
masterROI.computeIdxs(debug=debug)
startIdx=time.clock()
if type(self) is not customROI:
self.computeImgIdx(debug=debug)
self.matchImgIdx(masterROI)
self.computeExtIdx(debug=debug)
if self.embryo.simulation!=None:
if self.embryo.simulation.mesh.mesh==None:
printWarning("Mesh has not been generated, will not compute meshIdxs")
else:
self.computeMeshIdx(self.embryo.simulation.mesh)
if matchMesh:
if self!=masterROI:
self.matchMeshIdx(masterROI)
else:
printWarning("Simulation object does not exist yet, hence won't index for mesh.")
else:
self.updateIdxs()
self.matchImgIdx(masterROI)
if self.embryo.simulation!=None:
if self.embryo.simulation.mesh.mesh!=None:
self.matchMeshIdx(masterROI)
if debug:
print 'Compute Idxs: ', startIdx-time.clock()
return self.getAllIdxs()
def computeExtIdx(self,debug=False):
"""Computes indices of external pixels.
Does this by comparing extended pixels of ``self`` with the one of the master ROI.
Keyword Args:
debug (bool): Print out debugging messages.
Return:
tuple: Tuple containing:
* extImgIdxX (list): External image indices in x direction.
* extImgIdxY (list): External image indices in y direction.
"""
m=self.embryo.getMasterROI()
rois=[self,m]
[self.extImgIdxX,self.extImgIdxY]=pyfrp_idx_module.getCommonExtendedPixels(rois,self.embryo.dataResPx,debug=debug)
self.computeNumExt()
return self.extImgIdxX,self.extImgIdxY
def matchImgIdx(self,r):
"""Matches image indices of ``self`` with the ones of ROI ``r``.
Does this by generating masks of both ROIs and multiplicating them.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): ROI to match with.
Return:
tuple: Tuple containing:
* imgIdxX (list): Matched image indices in x direction.
* imgIdxY (list): Matched image indices in y direction.
"""
self.computeImgMask()
self.imgMask=self.imgMask*r.computeImgMask()
self.imgIdxX,self.imgIdxY=pyfrp_idx_module.mask2ind(self.imgMask,self.embryo.dataResPx)
return self.imgIdxX,self.imgIdxY
def matchMeshIdx(self,r,matchZ=False):
"""Matches mesh indices of ROI with the ones of a different ROI.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): ROI to match with.
Keyword Args:
matchZ (bool): Also match with respect to z.
Returns:
numpy.ndarray: Matched list of mesh indices.
"""
x,y,z=self.embryo.simulation.mesh.getCellCenters()
x=np.asarray(x)[self.meshIdx]
y=np.asarray(y)[self.meshIdx]
ins=r.checkXYInside(x,y)
self.meshIdx=np.asarray(self.meshIdx)
self.meshIdx=self.meshIdx[np.where(ins)[0]]
return self.meshIdx
def checkZInside(self,z):
"""Checks if z coordinate is within ROIs z-range.
Arg:
z (float): z-coordinate.
Returns:
bool: True if inside.
"""
if self.zmin<= z and z<=self.zmax:
return True
else:
return False
def showIdxs(self,axes=None):
"""Shows all three types of ROI indices.
Shows:
* Mesh indices by calling :py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.showMeshIdx`
* Image indices by calling :py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.showImgIdx`
* Extended indices by calling :py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.showExtImgIdx`
Keyword Args:
axes (list): List of axes to plot in (at least ``len(axes)=3``).
Returns:
list: List of ``matplotlib.axes``.
"""
wereNone=False
if axes==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,3],titles=["MeshIdx","imgIdx","extImgIdx"],sup=self.name+" Idx",proj=['3d',None,None])
wereNone=True
self.showMeshIdx(axes[0])
self.showImgIdx(axes[1])
self.showExtImgIdx(axes[2])
if wereNone:
axes[0].set_title(self.name+" Idx")
return axes
def checkSymmetry(self,debug=False):
img=np.zeros((self.embryo.dataResPx,self.embryo.dataResPx))
img[self.imgIdxX,self.imgIdxY]=1
return pyfrp_img_module.symmetryTest(img,debug=debug)
def idxs2Quad(self,debug=False):
if not self.checkSymmetry():
printWarning('Cannot reduce region '+self.name+' to quadrant. Indices are not symmetric.')
return self.getAllIdxs()
self.imgIdxX,self.imgIdxY=imgIdx2Quad(self.imgIdxX,self.imgIdxY,self.embryo.dataResPx,debug=debug)
if 'Quad' not in self.embryo.geometry.typ:
printWarning('Will not update mesh indices, geometry is not set to quad.')
else:
self.computeMeshIdx()
return self.getAllIdxs()
def idxs2Full(self):
return self.computeIdxs()
def resetDataVec(self):
"""Resets data vector to an empty list"""
self.setDataVec([])
return self
def resetSimVec(self):
"""Resets simulation vector to an empty list"""
self.setSimVec([])
return self
def plotData(self,ax=None,color=None,linewidth=1,legend=True,linestyle='-',label=None,legLoc=-1):
"""Plot data vector of ROI.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyle (str): Linestyle of plot.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
legLoc (int): Location of legend.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if label==None:
label=self.name + ' simulated'
ax = pyfrp_plot_module.plotTS(self.embryo.tvecData,self.dataVec,ax=ax,linewidth=linewidth,color=color,label=self.name + ' data',
title="Data",sup=self.name+" data",linestyle=linestyle,legend=legend,legLoc=legLoc)
return ax
def plotDataPinned(self,ax=None,color=None,linewidth=1,legend=True,linestyle='-',label=None,legLoc=-1):
"""Plot pinned data vector of ROI.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyle (str): Linestyle of plot.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
legLoc (int): Location of legend.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if label==None:
label=self.name + ' simulated'
ax = pyfrp_plot_module.plotTS(self.embryo.tvecData,self.dataVecPinned,ax=ax,linewidth=linewidth,color=color,label=self.name + ' data',
title="Data Pinned",sup=self.name+" data",linestyle=linestyle,legend=legend,legLoc=legLoc)
return ax
def plotSim(self,ax=None,color=None,linewidth=1,legend=True,linestyle='--',label=None,legLoc=-1):
"""Plot simulation vector of ROI.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyle (str): Linestyle of plot.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
legLoc (int): Location of legend.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if label==None:
label=self.name + ' simulated'
ax = pyfrp_plot_module.plotTS(self.embryo.simulation.tvecSim,self.simVec,ax=ax,linewidth=linewidth,color=color,
label=label,title="Simulation",sup=self.name+" simulation",linestyle=linestyle,legend=legend,legLoc=legLoc)
return ax
def plotSimPinned(self,ax=None,color=None,linewidth=1,legend=True,linestyle='--',label=None,legLoc=-1):
"""Plot pinned simulation vector of ROI.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyle (str): Linestyle of plot.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
legLoc (int): Location of legend.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if label==None:
label=self.name + ' simulated'
ax = pyfrp_plot_module.plotTS(self.embryo.simulation.tvecSim,self.simVecPinned,ax=ax,linewidth=linewidth,color=color,
label=self.name + ' ' + ' simulated',title="Simulation Pinned",sup=self.name+" simulation",linestyle=linestyle,legend=legend,legLoc=legLoc)
return ax
def findIncluded(self):
"""Returns list of :py:class:`pyfrp.subclasses.pyfrp_ROI.customROI` objects
in which ROI is included.
Returns:
list: List of customROIs.
"""
incl=[]
for r in self.embryo.ROIs:
if type(r) is customROI:
if r.roiIncluded(self):
incl.append(r)
return incl
def isMaster(self):
"""Returns if ROI is masterROI.
Returns:
bool: True if masterROI.
"""
return self==self.embryo.getMasterROI()
def getMaxExtendPlane(self):
"""Returns in which plane ("xy","xz","yz") the ROI has the biggest extend.
Returns:
str: Plane with largest extend.
"""
#Compute extend for all three coordinat4e
xmin,xmax,ymin,ymax,zmin, zmax = self.getExtend()
ext=[abs(xmax-xmin),abs(ymax-ymin),abs(zmax-zmin)]
#Get 2 largest values
mExts,indExts=pyfrp_misc_module.getIdxOfNLargest(ext,2)
plane=""
if 0 in indExts:
plane=plane+"x"
if 1 in indExts:
plane=plane+"y"
if 2 in indExts:
plane=plane+"z"
if len(plane)!=2:
printWarning("Something went wrong finding plane. Plane is " + plane)
return plane
def getPlaneMidCoordinate(self):
"""Returns midpoint of extend orthogonal to plane of maximum extension.
Returns:
float: Midpoint.
"""
plane=self.getMaxExtendPlane()
xmin,xmax,ymin,ymax,zmin, zmax = self.getExtend()
if plane=='xy':
return (zmax+zmin)/2.
elif plane=='xz':
return (ymax+ymin)/2.
elif plane=='yz':
return (xmax+xmin)/2.
def getOrthogonal2Plane(self):
"""Returns orthogonal direction to plane of maximum extension.
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.getPlaneMidCoordinate` and
:py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.getMaxExtendPlane` .
Returns:
str: Direction.
"""
plane=self.getMaxExtendPlane()
if plane=='xy':
return 'z'
elif plane=='xz':
return 'y'
elif plane=='yz':
return 'x'
def getExtend(self):
"""Returns x-/y-/z-extend of ROI.
Returns:
tuple: Tuple containing:
* xmin (float): Minimum x-coordinate.
* xmax (float): Maximum x-coordinate.
* ymin (float): Minimum y-coordinate.
* ymax (float): Maximum y-coordinate.
* zmin (float): Minimum z-coordinate.
* zmax (float): Maximum z-coordinate.
"""
[xmin,xmax],[ymin,ymax] = self.computeXYExtend()
zmin, zmax = self.getZExtend()
return xmin,xmax,ymin,ymax,zmin, zmax
def plotSolutionVariable(self,phi,ax=None,vmin=None,vmax=None,nlevels=25,colorbar=True,plane='xy',zs=None,zdir=None,mask=True,nPts=1000,mode='normal',title="Solution Variable",
typ='contour'):
"""Plots simulation solution variable over all indices of ROI as 2D contour plot.
.. note:: If no ``ax`` is given, will create new one.
``plane`` variable controls in which plane the solution variable is supposed to be plotted.
Acceptable input variables are ``"xy","xz","yz"``. See also
:py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.getMaxExtendPlane`.
See also http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.tricontourf .
.. warning:: ``matplotlib.pyplot.tricontourf`` has problems when ``phi`` only is in a single level of contour plot.
To avoid this, we currently add some noise in this case just to make it plottable. This is not the most elegant
solution.
You can find a more detailed explanation in the documentation of :py:func:`pyfrp.modules.pyfrp_plot_module.plotSolutionVariable`.
Args:
phi (fipy.CellVariable): Solution variable.
Keyword Args:
ax (matplotlib.axes): Axes used for plotting.
vmin (float): Minimum value displayed in contour plot.
vmax (float): Maximum value displayed in contour plot.
nlevels (int): Number of contour levels.
colorbar (bool): Display color bar.
plane (str): Plane in which solution variable is supposed to be plotted.
zs (float): In case of a 3D plot, height in direction zdir where to put contour.
zdir (str): Orthogonal direction to plane.
nPts (int): Number of points used for interpolating (only if ``mode=normal``).
mode (str): Which contour function to use.
title (str): Title of plot.
typ (str): Type of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
#Get values of phi if necessary
if hasattr(phi,'value'):
val=np.asarray(phi.value)
else:
val=np.asarray(phi)
#Get x/y/z/values coordinates in ROI
x,y,z=self.embryo.simulation.mesh.getCellCenters()
x=x[self.meshIdx]
y=y[self.meshIdx]
z=z[self.meshIdx]
val=val[self.meshIdx]
#Finding distance threshold later used for masking nodes.
dmaxX,dmaxY,dmaxZ=self.getMaxNodeDistance()
dFact=1.5
if plane=='xy':
X=x
Y=y
D=dFact*max([dmaxX,dmaxY])
elif plane=='xz':
X=x
Y=z
D=dFact*max([dmaxX,dmaxZ])
elif plane=='yz':
X=y
Y=z
D=dFact*max([dmaxY,dmaxZ])
else:
printError("Don't understand plane="+plane+". Will not plot.")
return None
if not mask:
D=None
ax=pyfrp_plot_module.plotSolutionVariable(x,y,val,ax=ax,vmin=vmin,vmax=vmax,nlevels=nlevels,colorbar=colorbar,
plane=plane,zs=zs,zdir=zdir,sup=self.name,dThresh=D,nPts=nPts,mode=mode,title=title,typ=typ)
return ax
def getSimConc(self,phi,append=True):
"""Computes the simulation concentration over ROI.
Args:
phi (fipy.CellVariable): Solution variable.
Keyword Args:
append (bool): Append result to simulation vector.
Returns:
float: Simulation concentration over ROI.
"""
cvs=self.embryo.simulation.mesh.mesh.getCellVolumes()
c=pyfrp_integration_module.getAvgConc(phi,cvs,self.meshIdx)
if append:
self.simVec.append(c)
return c
def pinAllTS(self,bkgdVal=None,normVal=None,bkgdValSim=None,normValSim=None,debug=False):
"""Pins both data and simulation timeseries of ROI.
See also :py:func:`pyfrp.modules.pyfrp_fit_module.pinConc`.
Keyword Args:
bkgdVal (float): Use this background value instead of newly computing it.
normVal (float): Use this norming value instead of newly computing it.
bkgdValSim (float): Use this background value for simulation timeseries instead of newly computing it.
normValSim (float): Use this norming value for simulation timeseries instead of newly computing it.
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* dataVecPinned (numpy.ndarray): Pinned data vector.
* simVecPinned (numpy.ndarray): Pinned simulation vector.
"""
bkgdValSim=pyfrp_misc_module.assignIfVal(bkgdValSim,bkgdVal,None)
normValSim=pyfrp_misc_module.assignIfVal(normValSim,normVal,None)
self.dataVecPinned = self.pinDataTS(bkgdVal=bkgdVal,normVal=normVal,debug=debug)
self.simVecPinned = self.pinSimTS(bkgdVal=bkgdValSim,normVal=normValSim,debug=debug)
return self.dataVecPinned,self.simVecPinned
def pinDataTS(self,bkgdVal=None,normVal=None,debug=False):
"""Pins data timeseries of ROI.
See also :py:func:`pyfrp.modules.pyfrp_fit_module.pinConc`.
Keyword Args:
bkgdVal (float): Use this background value instead of newly computing it.
normVal (float): Use this norming value instead of newly computing it.
debug (bool): Print debugging messages.
Returns:
numpy.ndarray: Pinned data vector.
"""
if bkgdVal==None or normVal==None:
bkgdValTemp,normValTemp = self.embryo.computePinVals(debug=debug)
bkgdVal = pyfrp_misc_module.assignIfVal(bkgdVal,bkgdValTemp,None)
normVal = pyfrp_misc_module.assignIfVal(normVal,normValTemp,None)
self.dataVecPinned=pyfrp_fit_module.pinConc(self.dataVec,bkgdVal,normVal,axes=None,debug=debug,tvec=self.embryo.tvecData,color=self.color)
return self.dataVecPinned
def pinSimTS(self,bkgdVal=None,normVal=None,debug=False):
"""Pins simulation timeseries of ROI.
See also :py:func:`pyfrp.modules.pyfrp_fit_module.pinConc`.
Keyword Args:
bkgdVal (float): Use this background value instead of newly computing it.
normVal (float): Use this norming value instead of newly computing it.
debug (bool): Print debugging messages.
Returns:
numpy.ndarray: Pinned simulation vector.
"""
if bkgdVal==None or normVal==None:
bkgdValTemp,normValTemp = self.embryo.computePinVals(debug=debug)
bkgdVal = pyfrp_misc_module.assignIfVal(bkgdVal,bkgdValTemp,None)
normVal = pyfrp_misc_module.assignIfVal(normVal,normValTemp,None)
self.simVecPinned=pyfrp_fit_module.pinConc(self.simVec,bkgdVal,normVal,axes=None,debug=debug,tvec=self.embryo.simulation.tvecSim,color=self.color)
return self.simVecPinned
def getFittedVec(self,fit):
"""Returns fitted simulation vector of ROI of given fit.
.. note:: To avoid crashes, function returns empty list
if ROI is in ``ROIsFItted`` but has not been fitted yet.
Also inserts an empty list at the respective index.
Args:
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
Returns:
numpy.ndarray: Fitted simulation vector.
"""
try:
return fit.fittedVecs[fit.ROIsFitted.index(self)]
except IndexError:
fit.fittedVecs.insert(fit.ROIsFitted.index(self),[])
#return fit.fittedVecs[fit.ROIsFitted.index(self)]
#This solves a problem only temporarily.
return []
def getdataVecFitted(self,fit):
"""Returns fitted data vector of ROI of given fit.
.. note:: To avoid crashes, function returns empty list
if ROI is in ``ROIsFItted`` but has not been fitted yet.
Also inserts an empty list at the respective index.
Args:
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
Returns:
numpy.ndarray: Fitted data vector.
"""
try:
return fit.dataVecsFitted[fit.ROIsFitted.index(self)]
except IndexError:
fit.dataVecsFitted.insert(fit.ROIsFitted.index(self),[])
return []
def plotFit(self,fit,ax=None,color=None,linewidth=1,legend=True,title=None,linestyles=['-','-.'],show=True):
"""Plot fit for ROI.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyles (list): Linestyles of data and simulation.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
show (bool): Show figure.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if title==None:
title="Fit "+fit.name
ax = pyfrp_plot_module.plotTS(fit.tvecFit,self.getFittedVec(fit),ax=ax,linewidth=linewidth,color=color,
label=self.name + ' ' + fit.name,title=title,sup=self.name+" fitted",linestyle=linestyles[1],legend=legend,show=show)
ax = pyfrp_plot_module.plotTS(fit.tvecFit,self.getdataVecFitted(fit),ax=ax,linewidth=linewidth,color=color,
label=self.name + ' ' + fit.name,title=title,sup=self.name+" fitted",linestyle=linestyles[0],legend=legend,show=show)
return ax
def isAnalyzed(self):
"""Checks if ROI has been analyzed.
Returns:
bool: True if ROI has been analyzed.
"""
return len(self.embryo.tvecData)==len(self.dataVec)
def isSimulated(self):
"""Checks if ROI has been simulated.
Returns:
bool: True if ROI has been simulated.
"""
if self.embryo.simulation!=None:
return len(self.embryo.simulation.tvecSim)==len(self.simVec)
return False
def isFitted(self):
"""Checks if ROI has been fitted in ALL fits of embryo.
Returns:
bool: True if ROI has been fitted.
"""
fitted=False
for fit in self.embryo.fits:
if self in fit.ROIsFitted and len(self.getFittedVec(fit))>0:
fitted=True
return fitted
def getInterpolationError(self):
"""Prints out interpolation error for the volume of this ROI.
Interpolation error is defined as:
``dataVec[0]/simVec[0]``,
That is, by how much does the first simulation value defer from first data
value.
Returns:
float: Interpolation error.
"""
if self.isSimulated() and self.isAnalyzed():
try:
return self.dataVec[0]/self.simVec[0]
except ZeroDivisionError:
printWarning("Dividing by zero. Going to return infinity.")
return np.inf
else:
printWarning("ROI is either not simulated or analyzed. Cannot return interpolation error.")
return 0.
def getEncapsulatingBox(self):
"""Returns encapsulating box of ROI.
That is, a box defined by ``[xmin,xmax],[ymin,ymax],[zmin,zmax]``
in which ROI lies fully within.
Returns:
tuple: Tuple containing:
* xExtend (list): List describing extend in x-direction (``[xmin,xmax]``).
* yExtend (list): List describing extend in y-direction (``[ymin,ymax]``).
* zExtend (list): List describing extend in z-direction (``[zmin,zmax]``).
"""
xExtend,yExtend=self.computeXYExtend()
zExtend=self.getZExtend()
return xExtend,yExtend,zExtend
def refineInMeshByField(self,factor=3.,addZ=15.,findIdxs=True,debug=False,run=True,fnOut=None):
"""Refines mesh inside ROI by adding box field to mesh file.
The mesh size inside the box is computed by ``mesh.volSizePx/factor``. To ensure
that there are enough original nodes inside ROI that then allow refinement from,
``addZ`` pixels is added in z-direction both below and above the ROI.
See also :py:func:`pyfrp.subclasses.pyfrp_mesh.mesh.addBoxField`.
Keyword Args:
factor (float): Refinement factor.
addZ (float): Number of pixels added above and below ROI for box field.
findIdxs (bool): Find mesh indices of ROI after refinement.
run (bool): Run Gmsh to generate new mesh after refinement.
debug (bool): Print debugging messages.
fnOut (str): Path to output geo file.
Returns:
str: Path to new .geo file.
"""
xExtend,yExtend,zExtend=self.getEncapsulatingBox()
zExtend=[zExtend[0]-addZ,zExtend[1]+addZ]
if debug:
print "Adding Box Field for ROI " + self.name
print "Mesh Nodes in ROI before: ", len(self.meshIdx)
fnOut=self.embryo.simulation.mesh.addBoxField(self.embryo.simulation.mesh.volSizePx/factor,xExtend,yExtend,zExtend,comment=self.name+" field",run=run,fnOut=fnOut)
if findIdxs:
self.computeMeshIdx(self.embryo.simulation.mesh)
if debug and findIdxs:
print "Mesh Nodes in ROI after: ", len(self.meshIdx)
return fnOut
def adaptRefineInMeshByField(self,nNodesReq,factor=3.,addZ=15.,zIncrement=1.,fIncrement=1.,nNodesMax='inf',debug=False,ROIReq=None,fnOut=None):
"""Refines mesh inside ROI adaptively until a given number of nodes inside ROI
is reached.
Does this by:
* Refining through :py:func:`refineInMeshByField`.
* Computing mesh indices via :py:func:`computeMeshIdx`.
* If number of nodes did not change, increase ``addZ``, else increase ``factor``.
* Check if desired number of nodes is reached or not, if not, repeat.
.. note:: If the new number of nodes in the ROI exceeds ``nNodesMax``, will revert the last step
and perform the other operation, e.g. increasing ``addZ`` instead of ``factor`` and vice versa.
.. note:: If ``ROIReq`` is given, will try to refine in ``self`` such that ``ROIReq`` has at least ``nNodesReq``
mesh nodes. If it is not given, ``nNodesReq`` refers to the nodes in ``self``.
Args:
nNodesReq (int): Desired number of nodes inside ROI.
Keyword Args:
factor (float): Refinement factor.
addZ (float): Number of pixels added above and below ROI for box field.
zIncrement (float): Number of pixels addZ is increased per adaptive step.
fIncrement (float): Stepsize of refinement factor.
nNodesMax (float): Maximum number of nodes allowed in ROI.
debug (bool): Print debugging messages.
ROIReq (pyfrp.subclasses.pyfrp_ROI.ROI): The ROI object that is referred to with nNodesReq.
fnOut (str): Path to output geo file.
Returns:
int: Final number of nodes in ROI.
"""
#Convert nNodesMax if necessary
nNodesMax=pyfrp_misc_module.translateNPFloat(nNodesMax)
#Get current node numbers
if ROIReq==None:
self.computeMeshIdx(self.embryo.simulation.mesh)
nNodes=len(self.meshIdx)
nNodesAll=self.embryo.simulation.mesh.getNNodes()
else:
ROIReq.computeMeshIdx(ROIReq.embryo.simulation.mesh)
nNodes=len(ROIReq.meshIdx)
nNodesROIReq=len(ROIReq.meshIdx)
nNodesAll=ROIReq.embryo.simulation.mesh.getNNodes()
nNodesROI=len(self.meshIdx)
#Init flags
mode=0
i=0
#As long as requirement isn't met, refine
while nNodes<nNodesReq:
self.refineInMeshByField(factor=factor,addZ=addZ,findIdxs=True,debug=False,run=True,fnOut=fnOut)
#Compute updated idxs
nNodesAllNew=self.embryo.simulation.mesh.getNNodes()
if ROIReq==None:
nNodesNew=len(self.meshIdx)
else:
ROIReq.computeMeshIdx(ROIReq.embryo.simulation.mesh)
nNodesNew=len(ROIReq.meshIdx)
nNodesROIReqNew=len(ROIReq.meshIdx)
nNodesROINew=len(self.meshIdx)
#Print out current status
if debug:
print "Iteration ", i, ". "
print "Current parameters: addZ = ", addZ, " factor = ", factor
print "Total mesh nodes: ", nNodesAllNew
print "Mesh Nodes in ROI before refinement: " , nNodesROI, " and after ", nNodesROINew, "."
if ROIReq!=None:
print "Mesh Nodes in ROIReq before refinement: " , nNodesROIReq, " and after ", nNodesROIReqNew, "."
#Check if nNodes requirement is met
if nNodesNew<nNodesReq:
if nNodesAllNew==nNodesAll:
if debug:
print "nNodesAll did not change, will increase addZ by ",zIncrement,". \n"
addZ=addZ+zIncrement
mode=0
else:
if debug:
print "nNodes not large enough yet, will increase factor by ",fIncrement,". \n"
factor=factor+fIncrement
mode=1
#Check if maximum number of nodes was exceeded
elif nNodesNew>nNodesMax:
if debug:
print "Number of nodes exceeded maximum allowed number", nNodesMax, "."
if mode==0:
if debug:
print "Previously tried to increase addZ. Will try old addZ, but increase factor by " ,fIncrement,". \n"
addZ=addZ-zIncrement
factor=factor+fIncrement
mode=1
elif mode==1:
if debug:
print "Previously tried to increase factor. Will try old factor, but increase addZ by " ,zIncrement,". \n"
addZ=addZ+zIncrement
factor=factor-fIncrement
mode=0
i=i+1
#Update old counter
nNodes=nNodesNew
nNodesAll=nNodesAllNew
nNodesROI=nNodesROINew
if ROIReq!=None:
nNodesROIReq=nNodesROIReqNew
return nNodes
def printDetails(self):
"""Prints out all attributes of ROI object."""
print "ROI ", self.name, " details:"
printAllObjAttr(self)
print
def plotSimConcProfile(self,phi,ax=None,direction='x',mode='normal',nbins=20,color=None,label=None,legend=False):
"""Plots concentration profile of solution variable in
single direction.
``mode`` can be either ``"normal"`` or ``"hist"``. If ``mode="hist"``, will plot a histogram with ``nbins`` bins using
:py:func:`pyfrp.modules.pyfrp_misc_module.simpleHist`.
.. note:: ``direction`` sets in which direction the profile should be plotted. if ``direction="r"``, then function
will plot a radial profile and uses ``self.embryo.geometry.center`` as center if ROI does not have a center,
else uses center of ROI.
.. note:: Will create axes if not given via ``ax``.
Example:
Grab ROI:
>>> sl=emb.getROIByName("Slice")
Make some plot:
>>> fig,axes=pyfrp_plot_module.makeSubplot([2,2])
Plot some concentration profiles:
>>> ax=sl.plotSimConcProfile(emb.simulation.IC,mode='hist',color='g',label='direction = x',nbins=100,ax=axes[0],legend=False)
>>> ax=sl.plotSimConcProfile(emb.simulation.IC,mode='hist',direction='y',color='r',label='direction = y',nbins=100,ax=axes[1],legend=False)
>>> ax=sl.plotSimConcProfile(emb.simulation.IC,mode='hist',direction='r',color='b',nbins=100,label='direction = r',ax=axes[2],legend=False)
>>> ax=sl.plotSimConcProfile(emb.simulation.IC,mode='normal',direction='r',color='b',label='direction = r',ax=axes[3],legend=False)
.. image:: ../imgs/pyfrp_ROI/plotSimConcProfile.png
Args:
phi (fipy.CellVariable): Solution variable
Keyword Args:
ax (matplotlib.axes): Axes to be plotted in.
direction (str): Direction to be plotted (x/y/z/r).
color (str): Color of plot.
legend (bool): Show legend.
label (str): Label of plot.
nbins (int): Number of bins of histogram.
mode (str): Either ``normal`` or ``hist``.
Returns:
matplotlib.axes: Matplotlib axes used for plotting.
"""
if color==None:
color=self.color
if label==None:
label=self.name
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["Concentration profile"],sup=self.name+" phi")
ax=axes[0]
if direction=='x':
x=self.embryo.simulation.mesh.getCellCenters()[0]
elif direction=='y':
x=self.embryo.simulation.mesh.getCellCenters()[1]
elif direction=='z':
x=self.embryo.simulation.mesh.getCellCenters()[2]
elif direction=='r':
if hasattr(self,'center'):
center=self.center
else:
center=self.embryo.geometry.center
x=np.sqrt((self.embryo.simulation.mesh.getCellCenters()[0]-center[0])**2+(self.embryo.simulation.mesh.getCellCenters()[1]-center[1])**2)
else:
printError('Direction '+ direction+ 'unknown. Will not plot.')
return ax
x=np.asarray(x)[self.meshIdx]
if hasattr(phi,'value'):
v=np.asarray(phi.value)[self.meshIdx]
else:
v=np.asarray(phi)[self.meshIdx]
vSorted,xSorted=pyfrp_misc_module.sortListsWithKey(v,x)
if mode=='hist':
xSorted,vSorted=pyfrp_misc_module.simpleHist(xSorted,vSorted,bins=nbins)
pyfrp_plot_module.plotTS(xSorted,vSorted,color=color,label=label,legend=legend,ax=ax)
ax.set_xlabel(direction)
ax.set_ylabel("Concentration")
pyfrp_plot_module.redraw(ax)
return ax
def getCopy(self):
"""Returns deepcopy of ROI object.
Uses ``copy.copy`` to generate copy of object, see also https://docs.python.org/2/library/copy.html .
``copy.deepcopy`` also generates copies of other objects, including ``ROI.embryo``.
"""
return copy.copy(self)
def getNMeshNodes(self):
"""Returns number of mesh indices inside ROI.
Returns:
int: Number of nodes.
"""
return len(self.meshIdx)
def getNImgPxs(self):
"""Returns number of image pixels inside ROI.
Returns:
int: Number of indices.
"""
return len(self.imgIdxX)
def getMaxNodeDistance(self):
"""Returns maximum node distance in x/y/z direction
for all nodes in ROI.
Returns:
tuple: Tuple containing:
* dmaxX (float): Maximum distance in x-direction
* dmaxY (float): Maximum distance in y-direction
* dmaxZ (float): Maximum distance in z-direction
"""
distances=self.embryo.simulation.mesh.mesh.cellDistanceVectors
return max(distances[0][self.meshIdx]),max(distances[1][self.meshIdx]),max(distances[2][self.meshIdx])
def genGmshDomain(self,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Translates ROI into gmsh domain object.
This object can then be used to write ROIs to ``.geo`` files.
.. note:: If ``minID==None``, will grab maximum ID via :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.getMaxGeoID` and add 1.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Domain object.
"""
if minID==None:
minID=self.embryo.geometry.getMaxGeoID()+1
d=pyfrp_gmsh_geometry.domain()
printWarning("This ROI type does not have genGmshDomain right now. This might change in further versions.")
return d
def writeToGeoFile(self,fn=None,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .geo file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.geo`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.genGmshDomain`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to geo file.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".geo"
fn=fn.replace(" ","_")
printWarning("ROI of type "+str(self.getType)+" does not have writeToGeoFile right now. This might change in further versions.")
return fn
def genMeshFile(self,fn=None,volSizePx=20.,debug=False,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .msh file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.msh`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.writeToGeoFile`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to mesh file.
"""
#Check filename
if fn!=None:
if not fn.endswith(".geo"):
fn,ext=os.path.splitext(fn)
fn=fn+".geo"
#Make geo file
fn=self.writeToGeoFile(fn=fn,volSizePx=volSizePx,minID=minID)
fnMsh=fn.replace(".geo",".msh")
#Run gmsh
pyfrp_gmsh_module.runGmsh(fn,fnOut=fnMsh,debug=debug,volSizeMax=volSizePx)
return fnMsh
def genAsOpenscadInGeometry(self):
"""Generates intersection between ROI and geometry as solid python object.
See also :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.genAsOpenscad` and
:py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.genAsOpenscad`.
Returns:
solid.solidpython.cylinder: Solid python object.
"""
openScadGeo=self.embryo.geometry.genAsOpenscad()
openScadROI=self.genAsOpenscad()
return openScadGeo*openScadROI
def render2Openscad(self,fn=None,segments=48):
"""Generates .scad file for the ROI.
.. note:: If ``fn`` is not given, will save .scad file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.scad``.
Keyword Args:
fn (str): Output filename.
segments (int): Number of segments used for convex hull of surface.
Returns:
str: Output filename.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".scad"
fn=fn.replace(" ","_")
solid.scad_render_to_file(self.genAsOpenscad(), filepath=fn,file_header='$fn = %s;' % segments, include_orig_code=False)
return fn
def render2OpenscadInGeometry(self,fn=None,segments=48):
"""Generates .scad file for the intersection between ROI and geometry.
.. note:: If ``fn`` is not given, will save .scad file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.scad``.
Keyword Args:
fn (str): Output filename.
segments (int): Number of segments used for convex hull of surface.
Returns:
str: Output filename.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".scad"
fn=fn.replace(" ","_")
solid.scad_render_to_file(self.genAsOpenscadInGeometry(), filepath=fn,file_header='$fn = %s;' % segments, include_orig_code=False)
return fn
def render2Stl(self,fn=None,segments=48):
"""Generates .stl file for the ROI.
Will do this by:
* Generating openscad object via :py:func:`genAsOpenscad`.
* Rendering this to scad file via :py:func:`render2Openscad`.
* Calling :py:func:`pyfrp.modules.pyfrp_openscad_module.runOpenscad`.
.. note:: If ``fn`` is not given, will save .stl file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.stl``.
Keyword Args:
fn (str): Output filename.
segments (int): Number of segments used for convex hull of surface.
Returns:
str: Output filename.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".scad"
fn=fn.replace(" ","_")
fnStl=fn
fnScad=fnStl.replace(".stl",".scad")
self.render2Openscad(fn=fnScad,segments=segments)
pyfrp_openscad_module.runOpenscad(fnScad,fnOut=fnStl)
return fnStl
def render2StlInGeometry(self,fn=None,segments=48):
"""Generates .stl file for the intersection between ROI and geometry.
Will do this by:
* Generating openscad object via :py:func:`genAsOpenscadInGeometry`.
* Rendering this to scad file via :py:func:`render2OpenscadInGeometry`.
* Calling :py:func:`pyfrp.modules.pyfrp_openscad_module.runOpenscad`.
.. note:: If ``fn`` is not given, will save .stl file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.stl``.
Keyword Args:
fn (str): Output filename.
segments (int): Number of segments used for convex hull of surface.
Returns:
str: Output filename.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".scad"
fn=fn.replace(" ","_")
fnStl=fn
fnScad=fnStl.replace(".stl",".scad")
self.render2OpenscadInGeometry(fn=fnScad,segments=segments)
pyfrp_openscad_module.runOpenscad(fnScad,fnOut=fnStl)
return fnStl
def addBoundaryLayerAtSurfaces(self,fn=None,segments=48):
"""Adds boundary layer around ROI to the mesh.
Does this by:
* Generating a stl file describing ROI, see also :py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.render2StlInGeometry`.
* Read in stl file as new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.domain` via
:py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.readStlFile`.
* Simplify new geometry via :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.simplifySurfaces`.
* Extracting selected surfaces via :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.gmshElement.extract`.
* If selected, surface boundaries are approximated into splines via
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.gmshElement.extract`.
* Reading in geometry's .geo file via :py:func:`pyfrp.sublcasses.pyfrp_geometry.geometry.readGeoFile`.
* Merging ROI geometry into main geometry via :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.merge`.
* Adding a boundary layer mesh via :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addBoundaryLayerField`.
* Adding all surfaces of ROI's domain to boundary layer, see
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField.addFaceListByID`.
* Writing new .geo file.
* Setting new .geo file as ``fnGeo``.
* Running :py:func:`genMesh`.
* Clean up .stl and .scad files that are not needed anymore.
.. note:: ``volSizeLayer`` only allows a single definition of mesh size in layer. Note that the
:py:class:`pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField` class allows different mesh sizes
normal and along surfaces. For more information, see its documentation.
.. note:: If no ``fnOut`` is given, will save a new .geo file in same folder as original ``fnGeo`` with subfix:
``fnGeo_roiName_BL.geo``.
.. note:: :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.simplifySurfaces` is not a simple procedure,
we recommend reading its documentation.
If ``volSizePx`` is given, will overwrite mesh's ``volSizePx`` and set it globally at all nodes.
Args:
roi (pyfrp.subclasses.pyfrp_ROI.ROI): An ROI.
Keyword Args:
fnOut (str): Path to new .geo file.
segments (int): Number of segments used for convex hull of surface.
simplify (bool): Simplify surfaces of stl file.
iterations (int): Number of iterations used for simplification.
triangIterations (int): Number of iterations used for subdivision of surfaces.
addPoints (bool): Allow adding points inside surface triangles.
fixSurfaces (bool): Allow fixing of surfaces, making sure they are coherent with Gmsh requirements.
debug (bool): Print debugging messages.
volSizePx (float): Global mesh density.
volSizeLayer (float): Boundary layer mesh size.
thickness (float): Thickness of boundary layer.
cleanUp (bool): Clean up temporary files when finished.
approxBySpline (bool): Approximate curvatures by spline.
angleThresh (float): Threshold angle under which loops are summarized.
faces (list): List of faces.
onlyAbs (bool): Take absolute value of faces into account.
Returns:
str: Path to new .geo file.
"""
if hasattr(self.embryo,'simulation'):
printError("addBoundaryLayerAtSurfaces: Embryo does not have simulation yet.")
return ""
return self.embryo.simulation.mesh.addBoundaryLayerAroundROI(self,fnOut=fnOut,segments=segments,simplify=simplify,iterations=iterations,triangIterations=triangIterations,
fixSurfaces=fixSurfaces,debug=debug,volSizePx=volSizePx,volSizeLayer=volSizeLayer,thickness=thickness,cleanUp=cleanUp,
approxBySpline=approxBySpline,angleThresh=angleThresh,faces=faces,onlyAbs=onlyAbs)
class radialROI(ROI):
"""Radial ROI class.
Inherits from :py:class:`ROI`.
Main attributes are:
* ``radius``: Radius of ROI.
* ``center``: Center of ROI.
"""
def __init__(self,embryo,name,Id,center,radius,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.radius=radius
self.center=center
def setRadius(self,r):
"""Sets radius of ROI.
Args:
r (float): New radius
Returns:
float: New radius.
"""
self.radius=r
return self.radius
def getRadius(self):
"""Returns current radius of ROI.
Returns:
float: Current radius.
"""
return self.radius
def setCenter(self,c):
"""Sets radius of ROI.
Args:
c (list): New center.
Returns:
list: New center.
"""
self.center=c
return self.center
def getCenter(self):
"""Returns current center of ROI.
Returns:
list: Current center.
"""
return self.center
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getCircleIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getCircleIdxImg(self.center,self.radius,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getCircleIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getCircleIdxMesh(self.center,self.radius,radius,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def showBoundary(self,color=None,linewidth=3,ax=None):
"""Shows ROI in a 2D plot.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linewidth (float): Linewidth of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["boundary"],sup=self.name+" boundary")
ax = axes[0]
img=np.nan*np.ones((self.embryo.dataResPx,self.embryo.dataResPx))
ax.imshow(img)
patch = ptc.Circle(self.center,self.radius,fill=False,linewidth=linewidth,color=color)
ax.add_patch(patch)
return ax
def center2Mid(self):
if np.mod(self.embryo.dataResPx,2)==0:
return self.setCenter([self.embryo.dataResPx/2+0.5,self.embryo.dataResPx/2+0.5])
else:
return self.setCenter([self.embryo.dataResPx/2,self.embryo.dataResPx/2])
def makeReducable(self,auto=False,debug=False):
oldCenter=self.getCenter()
self.center2Mid()
if not auto:
a=raw_input("Change center of region "+ self.name + " from " + str(oldCenter) + ' to ' + str(self.getCenter()) + ' ? [Y/N]')
if a=='N':
self.setCenter(oldCenter)
return False
elif a=='Y':
pass
if not self.checkSymmetry():
printWarning('Cannot make region '+self.name+' reducable.')
self.setCenter(oldCenter)
return False
return True
return False
def checkCentered(self):
if np.mod(self.embryo.dataResPx,2)==0:
return bool((self.center[0]==self.embryo.dataResPx/2+0.5) and (self.center[1]==self.embryo.dataResPx/2+0.5))
else:
return bool((self.center[0]==self.embryo.dataResPx/2.) and (self.center[1]==self.embryo.dataResPx/2.))
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideCircle`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideCircle(x,y,self.center,self.radius)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.center[0]-self.radius,self.center[0]+self.radius]
self.yExtend=[self.center[1]-self.radius,self.center[1]+self.radius]
return self.xExtend, self.yExtend
def getCenterOfMass(self):
"""Returns center of mass of ROI.
For a radial ROI, this is equivalent to the ``center``.
"""
return self.center
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
.. note:: Will grab extent of geometry to find bounds in z-direction.
Returns:
solid.solidpython.cylinder: Solid python object.
"""
try:
ext=self.embryo.geometry.getZExtend()
except AttributeError:
printError("genAsOpenscad: Cannot grab extend from geometry of type " + self.embryo.geometry.typ)
openScadROI=solid.translate([self.center[0],self.center[1],min(ext)])(solid.cylinder(r=self.radius,h=abs(max(ext)-min(ext))))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#slice ROI class
class sliceROI(ROI):
def __init__(self,embryo,name,Id,height,width,sliceBottom,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.height=height
self.width=width
self.sliceBottom=sliceBottom
self.computeZExtend()
def computeZExtend(self):
if self.sliceBottom:
self.setZExtend(self.height,self.height+self.width)
else:
self.setZExtend(self.height-0.5*self.width,self.height+0.5*self.width)
return self.zmin,self.zmax
def setHeight(self,h):
self.height=h
self.computeZExtend()
return self.height
def getHeight(self):
return self.height
def setSliceBottom(self,s):
self.sliceBottom=s
self.computeZExtend()
return self.sliceBottom
def getSliceBottom(self):
return self.sliceBottom
def setWidth(self,w):
self.width=w
self.computeZExtend()
return self.width
def getWidth(self):
return self.width
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getAllIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getAllIdxImg(self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getSliceIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
x,y,z=mesh.getCellCenters()
self.meshIdx=pyfrp_idx_module.getSliceIdxMesh(z,self.zmin,self.zmax)
return self.meshIdx
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
Only returns ``True``, since ``sliceROI`` is not limited in
x/y-direction.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y], all ``True``.
"""
return np.ones(np.asarray(x).shape).astype(bool)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
.. note:: Since sliceROI theoretically is not having any limits in x/y-direction,
function returns limits given by input image, that is, ``[0,embryo.dataResPx]``.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[1,self.embryo.dataResPx]
self.yExtend=[1,self.embryo.dataResPx]
return self.xExtend, self.yExtend
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
.. note:: Will grab extent of geometry to find bounds in x/y-direction.
Returns:
solid.solidpython.cube: Solid python object.
"""
try:
ext=self.embryo.geometry.getXYExtend()
except AttributeError:
printError("genAsOpenscad: Cannot grab extend from geometry of type " + self.embryo.geometry.typ)
z=self.getOpenscadZExtend()
zmin,zmax=min(z),max(z)
openScadROI=solid.translate([ext[0],ext[2],zmin])(solid.cube([ext[1]-ext[0],ext[3]-ext[2],abs(zmax-zmin)]))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Radial and slice ROI class
class radialSliceROI(sliceROI,radialROI):
def __init__(self,embryo,name,Id,center,radius,height,width,sliceBottom,color='b'):
radialROI.__init__(self,embryo,name,Id,center,radius,color=color)
sliceROI.__init__(self,embryo,name,Id,height,width,sliceBottom,color=color)
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getCircleIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getCircleIdxImg(self.center,self.radius,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getCircleIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getCircleIdxMesh(self.center,self.radius,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideCircle`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideCircle(x,y,self.center,self.radius)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.center[0]-self.radius,self.center[0]+self.radius]
self.yExtend=[self.center[1]-self.radius,self.center[1]+self.radius]
return self.xExtend, self.yExtend
def genGmshDomain(self,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Translates ROI into gmsh domain object.
This object can then be used to write ROIs to ``.geo`` files.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addCuboidByParameters`.
.. note:: If ``minID==None``, will grab maximum ID via :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.getMaxGeoID` and add 1.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Domain object.
"""
d=pyfrp_gmsh_geometry.domain()
d.addCylinderByParameters([self.center[0],self.center[1]],self.radius,self.height,self.width,volSizePx,
plane="z",genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol)
if minID==None:
minID=self.embryo.geometry.getMaxGeoID()+1
d.incrementAllIDs(minID)
return d
def writeToGeoFile(self,fn=None,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .geo file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.geo`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.genGmshDomain`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to geo file.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".geo"
fn=fn.replace(" ","_")
d=self.genGmshDomain(volSizePx=volSizePx,genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol,minID=minID)
d.writeToFile(fn)
return fn
def genAsOpenscad(self,allowInf=False):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Keyword Args:
allowInf (bool): Allow infinity in bounds of z-direction.
Returns:
solid.solidpython.cylinder: Solid python object.
"""
z=self.getOpenscadZExtend()
zmin,zmax=min(z),max(z)
openScadROI=solid.translate([self.center[0],self.center[1],zmin])(solid.cylinder(r=self.radius,h=abs(zmax-zmin)))
return openScadROI
#def plotIn3D(self,domain=None,ax=None):
###NOTE need this function here!!!
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Square ROI class
class squareROI(ROI):
def __init__(self,embryo,name,Id,offset,sidelength,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.sidelength=sidelength
self.offset=offset
def setSideLength(self,s):
self.sidelength=s
return self.sidelength
def getSideLength(self):
return self.sidelength
def setOffset(self,c):
self.offset=c
return self.offset
def getOffset(self):
return self.offset
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getSquareIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getSquareIdxImg(self.offset,self.sidelength,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getSquareIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getSquareIdxMesh(self.sidelength,self.offset,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def showBoundary(self,color=None,linewidth=3,ax=None):
"""Shows ROI in a 2D plot.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linewidth (float): Linewidth of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["boundary"],sup=self.name+" boundary")
ax = axes[0]
img=np.nan*np.ones((self.embryo.dataResPx,self.embryo.dataResPx))
ax.imshow(img)
patch = ptc.Rectangle(self.offset,self.sidelength,self.sidelength,fill=False,linewidth=linewidth,color=color)
ax.add_patch(patch)
return ax
def centerOffset(self):
if np.mod(self.embryo.dataResPx,2)==0:
return self.setOffset([self.embryo.dataResPx/2+0.5-self.sidelength/2,self.embryo.dataResPx/2+0.5-self.sidelength/2])
else:
return self.setOffset([self.embryo.dataResPx/2-self.sidelength/2,self.embryo.dataResPx/2-self.sidelength/2])
def makeReducable(self,auto=False,debug=False):
oldOffset=self.getOffset()
self.centerOffset()
if not auto:
a=raw_input("Change offset of region "+ self.name + " from " + str(oldOffset) + ' to ' + str(self.getOffset()) + ' ? [Y/N]')
if a=='N':
self.setOffset(oldOffset)
return False
elif a=='Y':
pass
if not self.checkSymmetry():
printWarning('Cannot make region '+self.name+' reducable.')
self.setOffset(oldCenter)
return False
return True
return False
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideSquare`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideSquare(x,y,self.offset,self.sidelength)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.offset[0],self.offset[0]+self.sidelength]
self.yExtend=[self.offset[1],self.offset[1]+self.sidelength]
return self.xExtend, self.yExtend
def getCorners(self):
"""Returns corners of square in counter-clockwise order, starting with offset.
Returns:
list: List of 2D coordinates of corners.
"""
corner1=np.asarray(self.offset)
corner2=np.asarray([self.offset[0]+self.sidelength,self.offset[1]])
corner3=np.asarray([self.offset[0]+self.sidelength,self.offset[1]+self.sidelength])
corner4=np.asarray([self.offset[0],self.offset[1]+self.sidelength])
return [corner1,corner2,corner3,corner4]
def getCenterOfMass(self):
r"""Computes center of mass of ROI.
The center of mass is computed by
.. math:: c = \frac{1}{N} \sum\limits_{i=1}{N} x_i ,
where :math:`c` is the center of mass, :math:`N` the number of corners and :math:`x_i` is the
coordinate of corner :math:`i` .
Returns:
numpy.ndarray: Center of mass.
"""
corners=self.getCorners()
CoM=cornes[0]
for i in range(1,len(corners)):
CoM=CoM+corners[i]
CoM=CoM/len(corners)
return CoM
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
.. note:: Will grab extent of geometry to find bounds in z-direction.
Returns:
solid.solidpython.cube: Solid python object.
"""
try:
ext=self.embryo.geometry.getZExtend()
except AttributeError:
printError("genAsOpenscad: Cannot grab extend from geometry of type " + self.embryo.geometry.typ)
openScadROI=solid.translate([self.offset[0],self.offset[1],min(ext)])(solid.cube([self.sidelength,self.sidelength,abs(max(ext)-min(ext))]))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Square and slice ROI class
class squareSliceROI(squareROI,sliceROI):
def __init__(self,embryo,name,Id,offset,sidelength,height,width,sliceBottom,color='b'):
squareROI.__init__(self,embryo,name,Id,offset,sidelength,color=color)
sliceROI.__init__(self,embryo,name,Id,height,width,sliceBottom,color=color)
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getSquareIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getSquareIdxImg(self.offset,self.sidelength,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getSquareIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getSquareIdxMesh(self.sidelength,self.offset,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideSquare`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideSquare(x,y,self.offset,self.sidelength)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.offset[0],self.offset[0]+self.sidelength]
self.yExtend=[self.offset[1],self.offset[1]+self.sidelength]
return self.xExtend, self.yExtend
def genGmshDomain(self,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Translates ROI into gmsh domain object.
This object can then be used to write ROIs to ``.geo`` files.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addCuboidByParameters`.
.. note:: If ``minID==None``, will grab maximum ID via :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.getMaxGeoID` and add 1.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Domain object.
"""
d=pyfrp_gmsh_geometry.domain()
d.addCuboidByParameters([self.offset[0],self.offset[1],self.height],self.sidelength,self.sidelength,self.width,volSizePx,
plane="z",genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol)
if minID==None:
minID=self.embryo.geometry.getMaxGeoID()+1
d.incrementAllIDs(minID)
return d
def writeToGeoFile(self,fn=None,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .geo file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.geo`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.genGmshDomain`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to geo file.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".geo"
fn=fn.replace(" ","_")
d=self.genGmshDomain(volSizePx=volSizePx,genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol,minID=minID)
d.writeToFile(fn)
return fn
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Returns:
solid.solidpython.cube: Solid python object.
"""
z=self.getOpenscadZExtend()
zmin,zmax=min(z),max(z)
openScadROI=solid.translate([self.offset[0],self.offset[1],zmin])(solid.cube([self.sidelength,self.sidelength,abs(zmax-zmin)]))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Rectangle ROI class
class rectangleROI(ROI):
def __init__(self,embryo,name,Id,offset,sidelengthX,sidelengthY,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.sidelengthX=sidelengthX
self.sidelengthY=sidelengthY
self.offset=offset
def setSideLengthX(self,s):
self.sidelengthX=s
return self.sidelengthX
def getSideLengthX(self):
return self.sidelengthX
def setSideLengthY(self,s):
self.sidelengthY=s
return self.sidelengthY
def getSideLengthY(self):
return self.sidelengthY
def setOffset(self,c):
self.offset=c
return self.offset
def getOffset(self):
return self.offset
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getRectangleIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getRectangleIdxImg(self.offset,self.sidelengthX,self.sidelengthY,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getRectangleIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getRectangleIdxMesh(self.sidelengthX,self.sidelengthY,self.offset,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def showBoundary(self,color=None,linewidth=3,ax=None):
"""Shows ROI in a 2D plot.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linewidth (float): Linewidth of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["boundary"],sup=self.name+" boundary")
ax = axes[0]
img=np.nan*np.ones((self.embryo.dataResPx,self.embryo.dataResPx))
ax.imshow(img)
patch = ptc.Rectangle(self.offset,self.sidelengthX,self.sidelengthY,fill=False,linewidth=linewidth,color=color)
ax.add_patch(patch)
return ax
def centerOffset(self):
if np.mod(self.embryo.dataResPx,2)==0:
return self.setOffset([self.embryo.dataResPx/2+0.5-self.sidelengthX/2,self.embryo.dataResPx/2+0.5-self.sidelengthY/2])
else:
return self.setOffset([self.embryo.dataResPx/2-self.sidelengthX/2,self.embryo.dataResPx/2-self.sidelengthY/2])
def makeReducable(self,atuo=False,debug=False):
oldOffset=self.getOffset()
self.centerOffset()
if not auto:
a=raw_input("Change offset of region "+ self.name + " from " + str(oldOffset) + ' to ' + str(self.getOffset()) + ' ? [Y/N]')
if a=='N':
self.setOffset(oldOffset)
return False
elif a=='Y':
pass
if not self.checkSymmetry():
printWarning('Cannot make region '+self.name+' reducable.')
self.setOffset(oldCenter)
return False
return True
return False
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideRectangle`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideRectangle(x,y,self.offset,self.sidelengthX,self.sidelengthY)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.offset[0],self.offset[0]+self.sidelengthX]
self.yExtend=[self.offset[1],self.offset[1]+self.sidelengthY]
return self.xExtend, self.yExtend
def getCorners(self):
"""Returns corners of rectangle in counter-clockwise order, starting with offset.
Returns:
list: List of 2D coordinates of corners.
"""
corner1=np.asarray(self.offset)
corner2=np.asarray([self.offset[0]+self.sidelengthX,self.offset[1]])
corner3=np.asarray([self.offset[0]+self.sidelengthX,self.offset[1]+self.sidelengthY])
corner4=np.asarray([self.offset[0],self.offset[1]+self.sidelengthY])
return [corner1,corner2,corner3,corner4]
def getCenterOfMass(self):
r"""Computes center of mass of ROI.
The center of mass is computed by
.. math:: c = \frac{1}{N} \sum\limits_{i=1}{N} x_i ,
where :math:`c` is the center of mass, :math:`N` the number of corners and :math:`x_i` is the
coordinate of corner :math:`i` .
Returns:
numpy.ndarray: Center of mass.
"""
corners=self.getCorners()
CoM=cornes[0]
for i in range(1,len(corners)):
CoM=CoM+corners[i]
CoM=CoM/len(corners)
return CoM
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
.. note:: Will grab extent of geometry to find bounds in z-direction.
Returns:
solid.solidpython.cube: Solid python object.
"""
try:
ext=self.embryo.geometry.getZExtend()
except AttributeError:
printError("genAsOpenscad: Cannot greab extend from geometry of type " + self.embryo.geometry.typ)
openScadROI=solid.translate([self.offset[0],self.offset[1],min(ext)])(solid.cube([self.sidelengthX,self.sidelengthY,abs(max(ext)-min(ext))]))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Rectangle and slice ROI class
class rectangleSliceROI(rectangleROI,sliceROI):
def __init__(self,embryo,name,Id,offset,sidelengthX,sidelengthY,height,width,sliceBottom,color='b'):
rectangleROI.__init__(self,embryo,name,Id,offset,sidelengthX,sidelengthY,color=color)
sliceROI.__init__(self,embryo,name,Id,height,width,sliceBottom,color=color)
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getRectangleIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getRectangleIdxImg(self.sidelengthX,self.sidelengthY,self.offset,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getRectangleIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getRectangleIdxMesh(self.sidelengthX,self.sidelengthY,self.offset,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsideRectangle`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsideRectangle(x,y,self.offset,self.sidelengthX,self.sidelengthY)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend=[self.offset[0],self.offset[0]+self.sidelengthX]
self.yExtend=[self.offset[1],self.offset[1]+self.sidelengthY]
return self.xExtend, self.yExtend
def genGmshDomain(self,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Translates ROI into gmsh domain object.
This object can then be used to write ROIs to ``.geo`` files.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addCuboidByParameters`.
.. note:: If ``minID==None``, will grab maximum ID via :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.getMaxGeoID` and add 1.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Domain object.
"""
d=pyfrp_gmsh_geometry.domain()
d.addCuboidByParameters([self.offset[0],self.offset[1],self.height],self.sidelengthX,self.sidelengthY,self.width,volSizePx,
plane="z",genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol)
if minID==None:
minID=self.embryo.geometry.getMaxGeoID()+1
d.incrementAllIDs(minID)
return d
def writeToGeoFile(self,fn=None,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .geo file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.geo`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.genGmshDomain`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to geo file.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".geo"
fn=fn.replace(" ","_")
d=self.genGmshDomain(volSizePx=volSizePx,genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol,minID=minID)
d.writeToFile(fn)
return fn
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Returns:
solid.solidpython.cube: Solid python object.
"""
z=self.getOpenscadZExtend()
zmin,zmax=min(z),max(z)
openScadROI=solid.translate([self.offset[0],self.offset[1],zmin])(solid.cube([self.sidelengthX,self.sidelengthY,abs(zmax-zmin)]))
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Polygon ROI class
class polyROI(ROI):
def __init__(self,embryo,name,Id,corners,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.corners=corners
def setCorners(self,corners):
self.corners=corners
return corners
def getCorners(self):
return self.corners
def addCorner(self,c,pos=-1):
self.corners.insert(pos,c)
return self.corners
def appendCorner(self,c):
self.corners.append(c)
return self.corners
def removeCorner(self,pos):
self.corners.pop(pos)
return self.corners
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getPolyIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getPolyIdxImg(self.corners,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getPolyIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getPolyIdxMesh(self.corners,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def showBoundary(self,color=None,linewidth=3,ax=None):
"""Shows ROI in a 2D plot.
If no color is specified, will use color specified in ``ROI.color``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linewidth (float): Linewidth of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["boundary"],sup=self.name+" boundary")
ax = axes[0]
img=np.nan*np.ones((self.embryo.dataResPx,self.embryo.dataResPx))
ax.imshow(img)
patch = ptc.Rectangle(self.corners,closed=True,fill=False,linewidth=linewidth,color=color)
ax.add_patch(patch)
return ax
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsidePoly`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsidePoly(x,y,self.corners)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
cornersNP=np.array(corners)
xmax=cornersNP[:,0].max()
xmin=cornersNP[:,0].min()
ymax=cornersNP[:,1].max()
ymin=cornersNP[:,1].min()
self.xExtend=[xmin,xmax]
self.yExtend=[ymin,ymax]
return self.xExtend, self.yExtend
def getCenterOfMass(self):
r"""Computes center of mass of ROI.
The center of mass is computed by
.. math:: c = \frac{1}{N} \sum\limits_{i=1}{N} x_i ,
where :math:`c` is the center of mass, :math:`N` the number of corners and :math:`x_i` is the
coordinate of corner :math:`i` .
Returns:
numpy.ndarray: Center of mass.
"""
CoM=np.asarray(self.cornes[0])
for i in range(1,len(self.corners)):
CoM=CoM+np.asarray(self.corners[i])
CoM=CoM/len(corners)
return CoM
def moveCorner(self,idx,x,y):
"""Moves corner to new postion.
Args:
idx (int): Index of corner to be moved.
x (float): New x-coordinate.
y (float): New y-coordinate.
Results:
list: Updated corners list.
"""
if idx==-1:
idx=len(self.corners)-1
for i,corner in enumerate(self.corners):
if i==idx:
self.corners[idx]=[x,y]
return self.corners
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Returns:
solid.solidpython.linear_extrude: Solid python object.
"""
try:
ext=self.embryo.geometry.getZExtend()
except AttributeError:
printError("genAsOpenscad: Cannot greab extend from geometry of type " + self.embryo.geometry.typ)
poly=solid.polygon(self.corners)
extruded=solid.linear_extrude(height = abs(max(ext)-min(ext)), center = False)(poly)
openScadROI=solid.translate([0,0,min(ext)])(extruded)
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Polygon and slice ROI class
class polySliceROI(polyROI,sliceROI):
def __init__(self,embryo,name,Id,corners,height,width,sliceBottom,color='b'):
polyROI.__init__(self,embryo,name,Id,corners,color=color)
sliceROI.__init__(self,embryo,name,Id,height,width,sliceBottom,color=color)
self.corners=corners
def computeImgIdx(self,debug=False):
"""Computes image indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getPolyIdxImg`.
Keyword Args:
debug (bool): Print debugging messages.
Returns:
tuple: Tuple containing:
* imgIdxX (list): Image indices in x-direction.
* imgIdxY (list): Image indices in y-direction.
"""
[self.imgIdxX,self.imgIdxY]=pyfrp_idx_module.getPolyIdxImg(self.corners,self.embryo.dataResPx,debug=debug)
return self.imgIdxX,self.imgIdxY
def computeMeshIdx(self,mesh):
"""Computes mesh indices of ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.getPolyIdxMesh`.
Args:
mesh (fipy.GmshImporter3D): Fipy mesh object.
Returns:
list: Newly computed mesh indices.
"""
self.meshIdx=pyfrp_idx_module.getPolyIdxMesh(self.corners,mesh,zmin=self.zmin,zmax=self.zmax)
return self.meshIdx
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
See also :py:func:`pyfrp.modules.pyfrp_idx_module.checkInsidePoly`.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
return pyfrp_idx_module.checkInsidePoly(x,y,self.corners)
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
cornersNP=np.array(corners)
xmax=cornersNP[:,0].max()
xmin=cornersNP[:,0].min()
ymax=cornersNP[:,1].max()
ymin=cornersNP[:,1].min()
self.xExtend=[xmin,xmax]
self.yExtend=[ymin,ymax]
return self.xExtend, self.yExtend
def genGmshDomain(self,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Translates ROI into gmsh domain object.
This object can then be used to write ROIs to ``.geo`` files.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addPrismByParameters`.
.. note:: If ``minID==None``, will grab maximum ID via :py:func:`pyfrp.subclasses.pyfrp_geometry.geometry.getMaxGeoID` and add 1.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Domain object.
"""
d=pyfrp_gmsh_geometry.domain()
d.addPrismByParameters(self.corners,volSizePx,z=self.zmin,height=self.zmax,plane="z",genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol)
if minID==None:
minID=self.embryo.geometry.getMaxGeoID()+1
d.incrementAllIDs(minID)
return d
def writeToGeoFile(self,fn=None,volSizePx=20.,genLoops=True,genSurfaces=True,genVol=True,minID=None):
"""Writes ROI to geo file.
.. note:: If ``fn`` is not given, will save .geo file of ROI in same folder as the geometry file of the embryo with the following path:
``path/to/embryos/geo/file/nameOfEmbryo_nameOfROI.geo`` .
See also :py:func:`pyfrp.subclasses.pyfrp_ROI.polySliceROI.genGmshDomain`.
Keyword Args:
volSizePx (float): Mesh size of vertices.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genVol (bool): Generate surface loop and corresponding volume.
minID (int): Id at which geo IDs should start.
Returns:
str: Path to geo file.
"""
if fn==None:
folder=os.path.dirname(self.embryo.geometry.fnGeo)
fn=pyfrp_misc_module.slashToFn(folder)+self.embryo.name+"_"+self.name+".geo"
fn=fn.replace(" ","_")
d=self.genGmshDomain(volSizePx=volSizePx,genLoops=genLoops,genSurfaces=genSurfaces,genVol=genVol,minID=minID)
d.writeToFile(fn)
return fn
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Returns:
solid.solidpython.linear_extrude: Solid python object.
"""
z=self.getOpenscadZExtend()
zmin,zmax=min(z),max(z)
poly=solid.polygon(self.corners)
extruded=solid.linear_extrude(height = abs(zmax-zmin), center = False)(poly)
openScadROI=solid.translate([0,0,zmin])(extruded)
return openScadROI
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Polygon and slice ROI class
class customROI(ROI):
def __init__(self,embryo,name,Id,color='b'):
ROI.__init__(self,embryo,name,Id,color=color)
self.ROIsIncluded=[]
self.procedures=[]
def addROI(self,r,p):
if r not in self.ROIsIncluded:
self.ROIsIncluded.append(r)
self.procedures.append(p)
return self.ROIsIncluded
def removeROI(self,r):
if r in self.ROIsIncluded:
i=self.ROIsIncluded.index(r)
self.ROIsIncluded.remove(r)
self.procedures.pop(i)
return self.ROIsIncluded
def mergeROIs(self,r):
if len(self.ROIsIncluded)==0:
self.copyIdxs(r)
else:
self.computeImgMask()
self.imgMask=self.imgMask*r.computeImgMask()
self.imgIdxX,self.imgIdxY=pyfrp_idx_module.mask2ind(self.imgMask,self.embryo.dataResPx)
self.meshIdx=pyfrp_misc_module.matchVals(self.meshIdx,r.meshIdx)
self.addROI(r,1)
self.extImgIdxX,self.extImgIdxY = pyfrp_idx_module.getCommonExtendedPixels(self.ROIsIncluded,self.embryo.dataResPx,procedures=self.procedures,debug=False)
return self.ROIsIncluded
def substractROIs(self,r):
if len(self.ROIsIncluded)==0:
self.copyIdxs(r)
else:
self.computeImgMask()
self.imgMask=self.imgMask*(1-r.computeImgMask())
self.imgIdxX,self.imgIdxY=pyfrp_idx_module.mask2ind(self.imgMask,self.embryo.dataResPx)
self.meshIdx=pyfrp_misc_module.complValsSimple(self.meshIdx,r.meshIdx)
self.addROI(r,-1)
self.extImgIdxX,self.extImgIdxY = pyfrp_idx_module.getCommonExtendedPixels(self.ROIsIncluded,self.embryo.dataResPx,procedures=self.procedures,debug=False)
return self.ROIsIncluded
def getROIsIncluded(self):
return self.ROIsIncluded
def setROIsIncluded(self,l):
self.ROIsIncluded=l
return self.ROIsIncluded
def updateIdxs(self):
self.emptyIdxs()
for i in range(len(self.ROIsIncluded)):
if i==0:
self.copyIdxs(self.ROIsIncluded[i])
else:
if self.procedures[i]==1:
self.mergeROIs(self.ROIsIncluded[i])
elif self.procedures[i]==-1:
self.substractROIs(self.ROIsIncluded[i])
else:
printWarning("Unknown Procedure" + str(self.procedures[i]) + " in Custom ROI " + self.name +". Not going to do anything.")
self.computeNumExt()
return self.getAllIdxs()
def showBoundary(self,color=None,linewidth=3,ax=None):
"""Shows ROI in a 2D plot by plotting all included ROIs.
If no color is specified, will use color specified in ``ROI.color``. If ``color=="each"``,
will plot each included ROI in its respective color.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linewidth (float): Linewidth of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if color==None:
color=self.color
if color=='each':
color=None
if ax==None:
fig,axes = pyfrp_plot_module.makeSubplot([1,1],titles=["boundary"],sup=self.name+" boundary")
ax = axes[0]
img=np.nan*np.ones((self.embryo.dataResPx,self.embryo.dataResPx))
ax.imshow(img)
for r in self.ROIsIncluded:
if hasattr(r,'showBoundary'):
r.showBoundary(color=color,ax=ax,linewidth=linewidth)
return ax
def checkXYInside(self,x,y):
"""Checks if coordinates are inside ROI.
Does this by looping through all ROIs specified in ``ROIsIncluded``
and checking if x/y is supposed to lie inside or outside of
the respective ROI.
Args:
x (np.ndarray): Array of x-coordinates.
y (np.ndarray): Array of y-coordinates.
Returns:
np.ndarray: Array of booleans with corresponding to [x,y].
"""
b=True
for i,r in enumerate(self.ROIsIncluded):
if self.procedures[i]==1:
b=b and r.checkXYInside(x,y)
elif self.procedures[i]==-1:
b=b and not r.checkXYInside(x,y)
return b
def computeXYExtend(self):
"""Computes extend of ROI in x/y direction.
Returns:
tuple: Tuple containing:
* xExtend (list): List containing minimum/maximum x-coordinate (``[xmin,xmax]``).
* yExtend (list): List containing minimum/maximum y-coordinate (``[ymin,ymax]``).
"""
self.xExtend,self.yExtend=pyfrp_idx_module.getCommonXYExtend(self.ROIsIncluded)
return self.xExtend,self.yExtend
def roiIncluded(self,r):
"""Returns if a ROI is included in customROI.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): A ROI.
Returns:
bool: ``True`` if included, ``False`` else.
"""
return r in self.ROIsIncluded
def genAsOpenscad(self):
"""Generates ROI as solid python object.
Useful if ROI is used to be passed to openscad.
Returns:
solid.solidpython.openscad_object: Solid python object.
"""
for i,r in enumerate(self.ROIsIncluded):
if i==0:
openScadROI=r.genAsOpenscad()
else:
if self.procedures[i]==-1:
openScadROI=openScadROI-r.genAsOpenscad()
elif self.procedures[i]==1:
openScadROI=openScadROI+r.genAsOpenscad()
return openScadROI
| mueller-lab/PyFRAP | pyfrp/subclasses/pyfrp_ROI.py | Python | gpl-3.0 | 111,299 |
# pylint: disable=C0111,R0903
"""Displays update information per repository for pacman.
Parameters:
* pacman.sum: If you prefere displaying updates with a single digit (defaults to 'False')
Requires the following executables:
* fakeroot
* pacman
contributed by `Pseudonick47 <https://github.com/Pseudonick47>`_ - many thanks!
"""
import os
import threading
import core.module
import core.widget
import core.decorators
import util.cli
import util.format
from bumblebee_status.discover import utility
# list of repositories.
# the last one should always be other
repos = ["core", "extra", "community", "multilib", "testing", "other"]
def get_pacman_info(widget, path):
cmd = utility("pacman-updates")
result = util.cli.execute(cmd, ignore_errors=True)
count = len(repos) * [0]
for line in result.splitlines():
if line.startswith(("http", "rsync")):
for i in range(len(repos) - 1):
if "/" + repos[i] + "/" in line:
count[i] += 1
break
else:
result[-1] += 1
for i in range(len(repos)):
widget.set(repos[i], count[i])
core.event.trigger("update", [widget.module.id], redraw_only=True)
class Module(core.module.Module):
@core.decorators.every(minutes=30)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.updates))
def updates(self, widget):
if util.format.asbool(self.parameter("sum")):
return str(sum(map(lambda x: widget.get(x, 0), repos)))
return "/".join(map(lambda x: str(widget.get(x, 0)), repos))
def update(self):
path = os.path.dirname(os.path.abspath(__file__))
thread = threading.Thread(target=get_pacman_info, args=(self.widget(), path))
thread.start()
def state(self, widget):
weightedCount = sum(
map(lambda x: (len(repos) - x[0]) * widget.get(x[1], 0), enumerate(repos))
)
if weightedCount < 10:
return "good"
return self.threshold_state(weightedCount, 100, 150)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/contrib/pacman.py | Python | mit | 2,175 |
import os, pickle, datetime, itertools, operator
from django.db import models as dbmodels
from autotest.frontend.afe import rpc_utils, model_logic
from autotest.frontend.afe import models as afe_models, readonly_connection
from autotest.frontend.tko import models, tko_rpc_utils, graphing_utils
from autotest.frontend.tko import preconfigs
#
# IMPORTANT: please update INTERFACE_VERSION with the current date whenever
# the interface changes, so that RPC clients can handle the changes
#
INTERFACE_VERSION = (2013, 05, 23)
# table/spreadsheet view support
def get_test_views(**filter_data):
return rpc_utils.prepare_for_serialization(
models.TestView.list_objects(filter_data))
def get_num_test_views(**filter_data):
return models.TestView.query_count(filter_data)
def get_group_counts(group_by, header_groups=None, fixed_headers=None,
extra_select_fields=None, **filter_data):
"""
Queries against TestView grouping by the specified fields and computings
counts for each group.
* group_by should be a list of field names.
* extra_select_fields can be used to specify additional fields to select
(usually for aggregate functions).
* header_groups can be used to get lists of unique combinations of group
fields. It should be a list of tuples of fields from group_by. It's
primarily for use by the spreadsheet view.
* fixed_headers can map header fields to lists of values. the header will
guaranteed to return exactly those value. this does not work together
with header_groups.
Returns a dictionary with two keys:
* header_values contains a list of lists, one for each header group in
header_groups. Each list contains all the values for the corresponding
header group as tuples.
* groups contains a list of dicts, one for each row. Each dict contains
keys for each of the group_by fields, plus a 'group_count' key for the
total count in the group, plus keys for each of the extra_select_fields.
The keys for the extra_select_fields are determined by the "AS" alias of
the field.
"""
query = models.TestView.objects.get_query_set_with_joins(filter_data)
# don't apply presentation yet, since we have extra selects to apply
query = models.TestView.query_objects(filter_data, initial_query=query,
apply_presentation=False)
count_alias, count_sql = models.TestView.objects.get_count_sql(query)
query = query.extra(select={count_alias: count_sql})
if extra_select_fields:
query = query.extra(select=extra_select_fields)
query = models.TestView.apply_presentation(query, filter_data)
group_processor = tko_rpc_utils.GroupDataProcessor(query, group_by,
header_groups or [],
fixed_headers or {})
group_processor.process_group_dicts()
return rpc_utils.prepare_for_serialization(group_processor.get_info_dict())
def get_num_groups(group_by, **filter_data):
"""
Gets the count of unique groups with the given grouping fields.
"""
query = models.TestView.objects.get_query_set_with_joins(filter_data)
query = models.TestView.query_objects(filter_data, initial_query=query)
return models.TestView.objects.get_num_groups(query, group_by)
def get_status_counts(group_by, header_groups=[], fixed_headers={},
**filter_data):
"""
Like get_group_counts, but also computes counts of passed, complete (and
valid), and incomplete tests, stored in keys "pass_count', 'complete_count',
and 'incomplete_count', respectively.
"""
return get_group_counts(group_by, header_groups=header_groups,
fixed_headers=fixed_headers,
extra_select_fields=tko_rpc_utils.STATUS_FIELDS,
**filter_data)
def get_latest_tests(group_by, header_groups=[], fixed_headers={},
extra_info=[], **filter_data):
"""
Similar to get_status_counts, but return only the latest test result per
group. It still returns the same information (i.e. with pass count etc.)
for compatibility. It includes an additional field "test_idx" with each
group.
@param extra_info a list containing the field names that should be returned
with each cell. The fields are returned in the extra_info
field of the return dictionary.
"""
# find latest test per group
initial_query = models.TestView.objects.get_query_set_with_joins(
filter_data)
query = models.TestView.query_objects(filter_data,
initial_query=initial_query,
apply_presentation=False)
query = query.exclude(status__in=tko_rpc_utils._INVALID_STATUSES)
query = query.extra(
select={'latest_test_idx' : 'MAX(%s)' %
models.TestView.objects.get_key_on_this_table('test_idx')})
query = models.TestView.apply_presentation(query, filter_data)
group_processor = tko_rpc_utils.GroupDataProcessor(query, group_by,
header_groups,
fixed_headers)
group_processor.process_group_dicts()
info = group_processor.get_info_dict()
# fetch full info for these tests so we can access their statuses
all_test_ids = [group['latest_test_idx'] for group in info['groups']]
test_views = initial_query.in_bulk(all_test_ids)
for group_dict in info['groups']:
test_idx = group_dict.pop('latest_test_idx')
group_dict['test_idx'] = test_idx
test_view = test_views[test_idx]
tko_rpc_utils.add_status_counts(group_dict, test_view.status)
group_dict['extra_info'] = []
for field in extra_info:
group_dict['extra_info'].append(getattr(test_view, field))
return rpc_utils.prepare_for_serialization(info)
def get_job_ids(**filter_data):
"""
Returns AFE job IDs for all tests matching the filters.
"""
query = models.TestView.query_objects(filter_data)
job_ids = set()
for test_view in query.values('job_tag').distinct():
# extract job ID from tag
first_tag_component = test_view['job_tag'].split('-')[0]
try:
job_id = int(first_tag_component)
job_ids.add(job_id)
except ValueError:
# a nonstandard job tag, i.e. from contributed results
pass
return list(job_ids)
# test detail view
def _attributes_to_dict(attribute_list):
return dict((attribute.attribute, attribute.value)
for attribute in attribute_list)
def _iteration_attributes_to_dict(attribute_list):
iter_keyfunc = operator.attrgetter('iteration')
attribute_list.sort(key=iter_keyfunc)
iterations = {}
for key, group in itertools.groupby(attribute_list, iter_keyfunc):
iterations[key] = _attributes_to_dict(group)
return iterations
def _format_iteration_keyvals(test):
iteration_attr = _iteration_attributes_to_dict(test.iteration_attributes)
iteration_perf = _iteration_attributes_to_dict(test.iteration_results)
all_iterations = iteration_attr.keys() + iteration_perf.keys()
max_iterations = max(all_iterations + [0])
# merge the iterations into a single list of attr & perf dicts
return [{'attr': iteration_attr.get(index, {}),
'perf': iteration_perf.get(index, {})}
for index in xrange(1, max_iterations + 1)]
def _job_keyvals_to_dict(keyvals):
return dict((keyval.key, keyval.value) for keyval in keyvals)
def get_detailed_test_views(**filter_data):
test_views = models.TestView.list_objects(filter_data)
tests_by_id = models.Test.objects.in_bulk([test_view['test_idx']
for test_view in test_views])
tests = tests_by_id.values()
models.Test.objects.populate_relationships(tests, models.TestAttribute,
'attributes')
models.Test.objects.populate_relationships(tests, models.IterationAttribute,
'iteration_attributes')
models.Test.objects.populate_relationships(tests, models.IterationResult,
'iteration_results')
models.Test.objects.populate_relationships(tests, models.TestLabel,
'labels')
jobs_by_id = models.Job.objects.in_bulk([test_view['job_idx']
for test_view in test_views])
jobs = jobs_by_id.values()
models.Job.objects.populate_relationships(jobs, models.JobKeyval,
'keyvals')
for test_view in test_views:
test = tests_by_id[test_view['test_idx']]
test_view['attributes'] = _attributes_to_dict(test.attributes)
test_view['iterations'] = _format_iteration_keyvals(test)
test_view['labels'] = [label.name for label in test.labels]
job = jobs_by_id[test_view['job_idx']]
test_view['job_keyvals'] = _job_keyvals_to_dict(job.keyvals)
return rpc_utils.prepare_for_serialization(test_views)
# graphing view support
def get_hosts_and_tests():
"""\
Gets every host that has had a benchmark run on it. Additionally, also
gets a dictionary mapping the host names to the benchmarks.
"""
host_info = {}
q = (dbmodels.Q(test_name__startswith='kernbench') |
dbmodels.Q(test_name__startswith='dbench') |
dbmodels.Q(test_name__startswith='tbench') |
dbmodels.Q(test_name__startswith='unixbench') |
dbmodels.Q(test_name__startswith='iozone'))
test_query = models.TestView.objects.filter(q).values(
'test_name', 'hostname', 'machine_idx').distinct()
for result_dict in test_query:
hostname = result_dict['hostname']
test = result_dict['test_name']
machine_idx = result_dict['machine_idx']
host_info.setdefault(hostname, {})
host_info[hostname].setdefault('tests', [])
host_info[hostname]['tests'].append(test)
host_info[hostname]['id'] = machine_idx
return rpc_utils.prepare_for_serialization(host_info)
def create_metrics_plot(queries, plot, invert, drilldown_callback,
normalize=None):
return graphing_utils.create_metrics_plot(
queries, plot, invert, normalize, drilldown_callback=drilldown_callback)
def create_qual_histogram(query, filter_string, interval, drilldown_callback):
return graphing_utils.create_qual_histogram(
query, filter_string, interval, drilldown_callback=drilldown_callback)
# TODO(showard) - this extremely generic RPC is used only by one place in the
# client. We should come up with a more opaque RPC for that place to call and
# get rid of this.
def execute_query_with_param(query, param):
cursor = readonly_connection.connection().cursor()
cursor.execute(query, param)
return cursor.fetchall()
def get_preconfig(name, type):
return preconfigs.manager.get_preconfig(name, type)
def get_embedding_id(url_token, graph_type, params):
try:
model = models.EmbeddedGraphingQuery.objects.get(url_token=url_token)
except models.EmbeddedGraphingQuery.DoesNotExist:
params_str = pickle.dumps(params)
now = datetime.datetime.now()
model = models.EmbeddedGraphingQuery(url_token=url_token,
graph_type=graph_type,
params=params_str,
last_updated=now)
model.cached_png = graphing_utils.create_embedded_plot(model,
now.ctime())
model.save()
return model.id
def get_embedded_query_url_token(id):
model = models.EmbeddedGraphingQuery.objects.get(id=id)
return model.url_token
# test label management
def add_test_label(name, description=None):
return models.TestLabel.add_object(name=name, description=description).id
def modify_test_label(label_id, **data):
models.TestLabel.smart_get(label_id).update_object(data)
def delete_test_label(label_id):
models.TestLabel.smart_get(label_id).delete()
def get_test_labels(**filter_data):
return rpc_utils.prepare_for_serialization(
models.TestLabel.list_objects(filter_data))
def get_test_labels_for_tests(**test_filter_data):
label_ids = models.TestView.objects.query_test_label_ids(test_filter_data)
labels = models.TestLabel.list_objects({'id__in' : label_ids})
return rpc_utils.prepare_for_serialization(labels)
def test_label_add_tests(label_id, **test_filter_data):
test_ids = models.TestView.objects.query_test_ids(test_filter_data)
models.TestLabel.smart_get(label_id).tests.add(*test_ids)
def test_label_remove_tests(label_id, **test_filter_data):
label = models.TestLabel.smart_get(label_id)
# only include tests that actually have this label
extra_where = test_filter_data.get('extra_where', '')
if extra_where:
extra_where = '(' + extra_where + ') AND '
extra_where += 'tko_test_labels.id = %s' % label.id
test_filter_data['extra_where'] = extra_where
test_ids = models.TestView.objects.query_test_ids(test_filter_data)
label.tests.remove(*test_ids)
# user-created test attributes
def set_test_attribute(attribute, value, **test_filter_data):
"""
* attribute - string name of attribute
* value - string, or None to delete an attribute
* test_filter_data - filter data to apply to TestView to choose tests to act
upon
"""
assert test_filter_data # disallow accidental actions on all hosts
test_ids = models.TestView.objects.query_test_ids(test_filter_data)
tests = models.Test.objects.in_bulk(test_ids)
for test in tests.itervalues():
test.set_or_delete_attribute(attribute, value)
# saved queries
def get_saved_queries(**filter_data):
return rpc_utils.prepare_for_serialization(
models.SavedQuery.list_objects(filter_data))
def add_saved_query(name, url_token):
name = name.strip()
owner = afe_models.User.current_user().login
existing_list = list(models.SavedQuery.objects.filter(owner=owner,
name=name))
if existing_list:
query_object = existing_list[0]
query_object.url_token = url_token
query_object.save()
return query_object.id
return models.SavedQuery.add_object(owner=owner, name=name,
url_token=url_token).id
def delete_saved_queries(id_list):
user = afe_models.User.current_user().login
query = models.SavedQuery.objects.filter(id__in=id_list, owner=user)
if query.count() == 0:
raise model_logic.ValidationError('No such queries found for this user')
query.delete()
# other
def get_motd():
return rpc_utils.get_motd()
def get_static_data():
result = {}
group_fields = []
for field in models.TestView.group_fields:
if field in models.TestView.extra_fields:
name = models.TestView.extra_fields[field]
else:
name = models.TestView.get_field_dict()[field].verbose_name
group_fields.append((name.capitalize(), field))
model_fields = [(field.verbose_name.capitalize(), field.column)
for field in models.TestView._meta.fields]
extra_fields = [(field_name.capitalize(), field_sql)
for field_sql, field_name
in models.TestView.extra_fields.iteritems()]
benchmark_key = {
'kernbench' : 'elapsed',
'dbench' : 'throughput',
'tbench' : 'throughput',
'unixbench' : 'score',
'iozone' : '32768-4096-fwrite'
}
tko_perf_view = [
['Test Index', 'test_idx'],
['Job Index', 'job_idx'],
['Test Name', 'test_name'],
['Subdirectory', 'subdir'],
['Kernel Index', 'kernel_idx'],
['Status Index', 'status_idx'],
['Reason', 'reason'],
['Host Index', 'machine_idx'],
['Test Started Time', 'test_started_time'],
['Test Finished Time', 'test_finished_time'],
['Job Tag', 'job_tag'],
['Job Name', 'job_name'],
['Owner', 'job_owner'],
['Job Queued Time', 'job_queued_time'],
['Job Started Time', 'job_started_time'],
['Job Finished Time', 'job_finished_time'],
['Hostname', 'hostname'],
['Platform', 'platform'],
['Machine Owner', 'machine_owner'],
['Kernel Hash', 'kernel_hash'],
['Kernel Base', 'kernel_base'],
['Kernel', 'kernel'],
['Status', 'status'],
['Iteration Number', 'iteration'],
['Performance Keyval (Key)', 'iteration_key'],
['Performance Keyval (Value)', 'iteration_value'],
]
result['group_fields'] = sorted(group_fields)
result['all_fields'] = sorted(model_fields + extra_fields)
result['test_labels'] = get_test_labels(sort_by=['name'])
result['current_user'] = rpc_utils.prepare_for_serialization(
afe_models.User.current_user().get_object_dict())
result['benchmark_key'] = benchmark_key
result['tko_perf_view'] = tko_perf_view
result['tko_test_view'] = model_fields
result['preconfigs'] = preconfigs.manager.all_preconfigs()
result['motd'] = rpc_utils.get_motd()
return result
# lower level access to tko models
def get_machines(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Machine.list_objects(filter_data))
def get_kernels(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Kernel.list_objects(filter_data))
def get_patches(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Patch.list_objects(filter_data))
def get_statuses(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Status.list_objects(filter_data))
def get_jobs(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Job.list_objects(filter_data))
def get_job_keyvals(**filter_data):
return rpc_utils.prepare_for_serialization(
models.JobKeyval.list_objects(filter_data))
def get_tests(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Test.list_objects(filter_data))
def get_test_attributes(**filter_data):
return rpc_utils.prepare_for_serialization(
models.TestAttribute.list_objects(filter_data))
def get_iteration_attributes(**filter_data):
return rpc_utils.prepare_for_serialization(
models.IterationAttribute.list_objects(filter_data))
def get_iteration_results(**filter_data):
return rpc_utils.prepare_for_serialization(
models.IterationResult.list_objects(filter_data))
def get_interface_version():
return INTERFACE_VERSION
| coreos/autotest | frontend/tko/rpc_interface.py | Python | gpl-2.0 | 19,221 |
#!/usr/bin/env python
import sys, os
import mmap # Thanks Steven @ http://stackoverflow.com/questions/4940032/search-for-string-in-txt-file-python
import subprocess
import readline
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.parse_and_bind("set match-hidden-files off")
import argparse
def query(question, default, skipQuery=False):
if skipQuery:
return default
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
class MyArgumentDefaultsHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: "%(default)s")'
return help
if __name__ == '__main__':
print("-------------------------------------")
print("Welcome to song-directory-to-songbook")
print("-------------------------------------")
parser = argparse.ArgumentParser(formatter_class = MyArgumentDefaultsHelpFormatter)
parser.add_argument('--input',
help='path of the default song input directory',
default='examples/')
parser.add_argument('--output',
help='name of the output pdf file',
default='Songbook.pdf')
parser.add_argument('--template',
help='name of the LaTeX template file [specifies language, etc]',
default='template/english.tex')
parser.add_argument('--manifest',
help='name of a file-avoiding manifest file [if desired]',
default='')
parser.add_argument('--yes',
help='accept all, skip all queries',
nargs='?',
default='NULL') # required, see below
args = parser.parse_args()
skipQueries = False
if args.yes is not 'NULL': # if exists and no contents, replaces 'NULL' by None
print("Detected --yes parameter: will skip queries")
skipQueries = True
# Query the path of the song input directory
inputDirectory = query("Please specify the path of the song input directory", args.input, skipQueries)
print("Will use song input directory: " + inputDirectory)
# Query the path of the song input directory
outputFile = query("Please specify the name of the output pdf file", args.output, skipQueries)
print("Will use the output pdf file: " + outputFile)
outputFileName, outputFileExtension = os.path.splitext(outputFile)
# Query the path of the template file
templateFile = query("Please specify the path of the LaTeX template file [specifies language, format]", args.template, skipQueries)
print("Will use template file: " + templateFile)
# Query (optional) the path of a file-avoiding manifest file
manifestFile = query("Please specify the name of a file-avoiding manifest file [if desired]", args.manifest, skipQueries)
if manifestFile == "":
print("Not using file-avoiding manifest file.")
else:
print("Will use file-avoiding manifest file: " + manifestFile)
manifestFileFd = open(manifestFile, 'r')
manifestMmap = mmap.mmap(manifestFileFd.fileno(), 0, access=mmap.ACCESS_READ)
manifestFileFd.close()
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk(inputDirectory):
for filename in sorted(filenames):
name, extension = os.path.splitext(filename)
if manifestFile != "":
if manifestMmap.find(name) != -1:
print "Skipping:", name
continue
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
songName = name.split(" - ")[-1]
#-- We cannot use [] yet (they will be replaced because choir), so use {{}}.
rep += "\\index{{aux-song-index-file}}{" + songName + "}\n"
rep += "\\begin{alltt}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
song.close()
rep += "\\end{alltt}\n"
rep += "\n"
#sys.stdout.write(rep) #-- Screen output for debugging.
#-- replace chords delimiter ()
rep = rep.replace("(","\\textbf{(")
rep = rep.replace(")",")}")
#-- replace choir delimiter []
rep = rep.replace("[","\\textit{[")
rep = rep.replace("]","]}")
#-- now we can do this
rep = rep.replace("{{aux-song-index-file}}","[aux-song-index-file]")
#-- replace template contents
s = s.replace("TITLE", outputFileName)
s = s.replace("genSongbook",rep)
outputFileTex = outputFileName + ".tex"
outFd = open(outputFileTex, 'w')
outFd.write(s)
outFd.close()
#http://stackoverflow.com/questions/6818102/detect-and-handle-a-latex-warning-error-generated-via-an-os-system-call-in-pytho
#pdftex_process = subprocess.Popen(['pdflatex', '-interaction=nonstopmode', '%s'%topic], shell=False, stdout=subprocess.PIPE)
pdftex_process = subprocess.call(['pdflatex', outputFileTex])
pdftex_process = subprocess.call(['pdflatex', outputFileTex])
os.remove("aux-song-index-file.idx")
os.remove("aux-song-index-file.ilg")
os.remove("aux-song-index-file.ind")
os.remove(outputFileName + ".aux")
os.remove(outputFileName + ".log")
os.remove(outputFileName + ".out")
os.remove(outputFileName + ".toc")
os.remove(outputFileTex) # may be interested in keeping
| UC3Music/genSongbook | song-directory-to-songbook.py | Python | unlicense | 6,143 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2016-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.calculators.export import export
from openquake.calculators.views import view
from openquake.calculators import ucerf_base
from openquake.qa_tests_data import ucerf
from openquake.calculators.tests import CalculatorTestCase
class UcerfTestCase(CalculatorTestCase):
def test_event_based(self):
self.run_calc(ucerf.__file__, 'job.ini')
gmv_uc = view('global_gmfs', self.calc.datastore)
[fname] = export(('ruptures', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/ruptures.csv', fname, delta=2E-5)
# run a regular event based on top of the UCERF ruptures and
# check the generated hazard maps
self.calc.datastore.close() # avoid https://ci.openquake.org/job/macos/job/master_macos_engine/label=catalina,python=python3.7/5388/consoleFull
self.run_calc(ucerf.__file__, 'job.ini',
calculation_mode='event_based',
hazard_calculation_id=str(self.calc.datastore.calc_id))
# check they produce the same GMFs
gmv_eb = view('global_gmfs', self.calc.datastore)
self.assertEqual(gmv_uc, gmv_eb)
# check the mean hazard map
[fname] = [f for f in export(('hmaps', 'csv'), self.calc.datastore)
if 'mean' in f]
self.assertEqualFiles('expected/hazard_map-mean.csv', fname,
delta=1E-5)
def test_event_based_sampling(self):
self.run_calc(ucerf.__file__, 'job_ebh.ini')
# check the distribution of the events
self.assertEventsByRlz([22, 13])
def test_classical(self):
self.run_calc(ucerf.__file__, 'job_classical_redux.ini',
ruptures_per_block='50', exports='csv')
fnames = export(('hcurves/', 'csv'), self.calc.datastore)
expected = ['hazard_curve-0-PGA.csv', 'hazard_curve-0-SA(0.1).csv',
'hazard_curve-1-PGA.csv', 'hazard_curve-1-SA(0.1).csv']
for fname, exp in zip(fnames, expected):
self.assertEqualFiles('expected/' + exp, fname)
# make sure this runs
view('fullreport', self.calc.datastore)
def test_classical_time_dep(self):
ucerf_base.RUPTURES_PER_BLOCK = 10 # check splitting
out = self.run_calc(ucerf.__file__, 'job_classical_time_dep_redux.ini',
exports='csv')
ucerf_base.RUPTURES_PER_BLOCK = 1000 # resume default
fname = out['hcurves', 'csv'][0]
self.assertEqualFiles('expected/hazard_curve-td-mean.csv', fname,
delta=1E-6)
# make sure this runs
view('fullreport', self.calc.datastore)
def test_classical_time_dep_sampling(self):
ucerf_base.RUPTURES_PER_BLOCK = 10 # check splitting
out = self.run_calc(ucerf.__file__, 'job_classical_time_dep_redux.ini',
number_of_logic_tree_samples='2',
exports='csv')
ucerf_base.RUPTURES_PER_BLOCK = 1000 # resume default
fname = out['hcurves', 'csv'][0]
self.assertEqualFiles('expected/hazard_curve-sampling.csv', fname,
delta=1E-6)
| gem/oq-engine | openquake/calculators/tests/ucerf_test.py | Python | agpl-3.0 | 3,954 |
#!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):8333$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| Infernoman/skidoo | contrib/seeds/makeseeds.py | Python | mit | 3,747 |
# -*- coding: utf-8 -*-
import os
import subprocess
from PyQt4.QtGui import *
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import Setting, ProcessingConfig
from sextante_animove.mcp import mcp
from sextante_animove.kernelDensity import kernelDensity
from sextante_animove.RandomHR import RandomHR
from sextante_animove.RandomPath import RandomPath
class animoveAlgorithmProvider(AlgorithmProvider):
FIELD_SEPARATOR = 'FIELD_SEPARATOR'
DECIMAL_SEPARATOR = 'DECIMAL_SEPARATOR'
def __init__(self):
AlgorithmProvider.__init__(self)
self.activate = False
self.alglist = [mcp(), RandomHR(), RandomPath()]
# Check scipy
try:
from scipy.stats.kde import gaussian_kde
has_scipy = True
except:
has_scipy = False
# Check statsmodels
try:
from statsmodels.nonparametric import kernel_density
has_statsmodels = True
except:
has_statsmodels = False
# Check gdal_contour
try:
subprocess.call('gdal_contour')
has_gdal_contour = True
except:
has_gdal_contour = False
if has_gdal_contour and (has_scipy or has_statsmodels):
self.alglist.append(kernelDensity())
for alg in self.alglist:
alg.provider = self
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
ProcessingConfig.addSetting(Setting('AniMove',
self.FIELD_SEPARATOR, 'CSV field separator', ','))
ProcessingConfig.addSetting(Setting('AniMove',
self.DECIMAL_SEPARATOR, 'CSV decimal separator', '.'))
def unload(self):
AlgorithmProvider.unload(self)
ProcessingConfig.removeSetting(self.FIELD_SEPARATOR)
ProcessingConfig.removeSetting(self.DECIMAL_SEPARATOR)
def getDescription(self):
return 'AniMove'
def getName(self):
return 'AniMove'
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/icons/radiotelemetry.png')
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gioman/radio_telemetry_tools | animoveAlgorithmProvider.py | Python | gpl-2.0 | 2,320 |
# -*- coding: utf-8 -*-
# encoding: utf-8
from woo import utils, ymport, qt, plot
from woo import log
log.setLevel('Law2_ScGeom_WirePhys_WirePM',log.TRACE) # must compile with debug option to get logs
## definition of some colors for colored text output in terminal
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BLACK = '\033[0m'
#### short description of script
print BLUE+'''
Simple test for two particles to test the contact law for the WireMat
by unsing the '''+RED+'''StepDisplacer'''+BLUE+''' with loading and unloading.
'''+BLACK
#### define parameters for the net
# mesh opening size
mos = 80./1000.
a = mos/sqrt(3)
# wire diameter
d = 2.7/1000.
# particle radius
radius = d*5.
# define piecewise lineare stress-strain curve
strainStressValues=[(0.0019230769,2.5e8),(0.0192,3.2195e8),(0.05,3.8292e8),(0.15,5.1219e8),(0.25,5.5854e8),(0.3,5.6585e8),(0.35,5.6585e8)]
# elastic material properties
particleVolume = 4./3.*pow(radius,3)*pi
particleMass = 3.9/1000.
density = particleMass/particleVolume
young = strainStressValues[0][1] / strainStressValues[0][0]
poisson = 0.3
#### material definition
netMat = O.materials.append(WireMat(young=young,poisson=poisson,density=density,isDoubleTwist=False,diameter=d,strainStressValues=strainStressValues,lambdaEps=0.4,lambdak=0.21))
#### create boddies, default: dynamic=True
O.bodies.append( utils.sphere([0,0,0], radius, wire=False, color=[1,0,0], highlight=False, material=netMat) )
O.bodies.append( utils.sphere([0,a,0], radius, wire=False, color=[0,1,0], highlight=False, material=netMat) )
FixedSphere=O.bodies[0]
MovingSphere=O.bodies[1]
FixedSphere.dynamic=False
MovingSphere.dynamic=False
def addPlotData():
if O.iter < 1:
plot.addData( Fn=0., un=0. )
#plot.saveGnuplot('net-2part-displ-unloading')
else:
try:
i=O.interactions[FixedSphere.id,MovingSphere.id]
plot.addData( Fn=i.phys.normalForce.norm(), un=(O.bodies[1].state.pos[1]-O.bodies[0].state.pos[1])-a )
#plot.saveGnuplot('net-2part-displ-unloading')
except:
print "No interaction!"
O.pause()
#### define simulation to create link
interactionRadius=2.
O.engines = [
ForceResetter(),
InsertionSortCollider( [Bo1_Sphere_Aabb(aabbEnlargeFactor=interactionRadius,label='aabb')] ),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(interactionDetectionFactor=interactionRadius,label='Ig2ssGeom')],
[Ip2_WireMat_WireMat_WirePhys(linkThresholdIteration=1,label='interactionPhys')],
[Law2_ScGeom_WirePhys_WirePM(linkThresholdIteration=1,label='interactionLaw')]
),
NewtonIntegrator(damping=0.),
PyRunner(initRun=True,iterPeriod=1,command='addPlotData()')
]
#### plot some results
plot.plots={'un':('Fn',)}
plot.plot(noShow=False, subPlots=False)
#### create link (no time step needed since loading is involved in this step)
O.step() # create cohesive link (cohesiveTresholdIteration=1)
#### initializes now the interaction detection factor
aabb.aabbEnlargeFactor=-1.
Ig2ssGeom.interactionDetectionFactor=-1.
## time step definition
## no time step definition is required since setVelocities=False in StepDisplacer
#### define simulation loading
O.engines = [StepDisplacer( ids=[1],mov=Vector3(0,+1e-5,0),rot=Quaternion().Identity,setVelocities=False )] + O.engines
print 'Loading (press enter)'
raw_input()
O.run(100,True)
#### define simulation unloading
O.engines = [StepDisplacer( ids=[1],mov=Vector3(0,-1.3e-5,0),rot=Quaternion().Identity,setVelocities=False )] + O.engines[1:]
print 'Unloading (press enter)'
raw_input()
O.run(50,True)
#### define simulation reloading
O.engines = [StepDisplacer( ids=[1],mov=Vector3(0,+1.6e-5,0),rot=Quaternion().Identity,setVelocities=False )] + O.engines[1:]
print 'Reloading (press enter)'
raw_input()
O.run(500,True)
#### define simulation unloading
O.engines = [StepDisplacer( ids=[1],mov=Vector3(0,-1.45e-5,0),rot=Quaternion().Identity,setVelocities=False )] + O.engines[1:]
print 'Reunloading (press enter)'
raw_input()
O.run(10,True)
#### define simulation reloading
O.engines = [StepDisplacer( ids=[1],mov=Vector3(0,+1.6e-5,0),rot=Quaternion().Identity,setVelocities=False )] + O.engines[1:]
print 'Reloading (press enter)'
raw_input()
O.run(500,True)
#### to see it
v=qt.Controller()
v=qt.View()
rr=qt.Renderer()
rr.intrAllWire=True
| sjl767/woo | scripts/test-OLD/WireMatPM/net-2part-displ-unloading.py | Python | gpl-2.0 | 4,387 |
#-------------------------------------------------------------------------------
# elftools tests
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
import unittest
from elftools.common.py3compat import (iterbytes, iterkeys, itervalues,
iteritems)
class TestPy3Compat(unittest.TestCase):
def test_iterbytes(self):
bi = iterbytes(b'fo1')
self.assertEqual(next(bi), b'f')
self.assertEqual(next(bi), b'o')
self.assertEqual(next(bi), b'1')
with self.assertRaises(StopIteration):
next(bi)
def test_iterdict(self):
d = {1: 'foo', 2: 'bar'}
self.assertEqual(list(sorted(iterkeys(d))), [1, 2])
self.assertEqual(list(sorted(itervalues(d))), ['bar', 'foo'])
self.assertEqual(list(sorted(iteritems(d))), [(1, 'foo'), (2, 'bar')])
if __name__ == '__main__':
unittest.main()
| pombredanne/pyelftools | test/test_py3compat.py | Python | unlicense | 1,006 |
"""Blog managers."""
from django.db import models
from django.utils.timezone import now
class PostManager(models.Manager): # pylint: disable=too-few-public-methods
"""Post manager"""
def public(self):
"""Filter the queryset to obtain the public posts."""
return self.filter(status='PB', creation_date__lt=now())
def sticky(self):
"""Filter the queryset to obtain the sticky posts."""
return self.filter(sticky=True)
| arpegio-dj/arpegio | arpegio/blog/managers.py | Python | bsd-3-clause | 465 |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import json
import time
import Queue
import requests
import threading
import logging
import socket
class StopThreadException(Exception):
pass
class FlasqueFormatter(logging.Formatter):
RECORD_ATTRS = (
"threadName", "name", "thread", "created",
"process", "processName", "relativeCreated",
"module", "funcName", "levelno", "msecs",
"pathname", "lineno", "filename", "levelname",
)
def __init__(self):
self.hostname = socket.gethostname()
super(FlasqueFormatter, self).__init__()
def format(self, record):
attrs = {k: getattr(record, k) for k in self.RECORD_ATTRS}
attrs["message"] = record.getMessage()
attrs["hostname"] = self.hostname
return json.dumps(attrs)
class FlasqueHandler(logging.Handler):
def __init__(self, producer):
self.producer = producer
logging.Handler.__init__(self)
self.setFormatter(FlasqueFormatter())
def emit(self, record):
self.producer.put(self.format(record))
class Message(object):
def __init__(self, msgid, channel, data):
self.id = msgid
self.channel = channel
self.data = data
super(Message, self).__init__()
def __str__(self):
return self.data
class ThreadQueue(threading.Thread):
def __init__(self, url, qname):
super(ThreadQueue, self).__init__()
self.url = url
self.qname = qname
self.q = Queue.Queue()
self.daemon = True
self.session = requests.Session()
self._stop = threading.Event()
def run(self):
while True:
try:
self.loop()
except StopThreadException:
break
except requests.exceptions.RequestException:
continue
def make_request(self, func, *args, **kwargs):
while True:
try:
res = func(*args, **kwargs)
except requests.exceptions.RequestException:
pass
else:
if res.status_code == 200:
return res
if self._stop.is_set():
raise StopThreadException
time.sleep(1)
def get(self, timeout=None):
if timeout is None:
while True:
try:
js = self.q.get(timeout=1)
except Queue.Empty:
pass
else:
break
else:
js = self.q.get(timeout=timeout)
if js is not None:
return Message(js["id"], js["channel"], js["data"])
def put(self, *args, **kwargs):
return self.q.put(*args, **kwargs)
def task_done(self):
return self.q.task_done()
def stop(self):
self._stop.set()
class Producer(ThreadQueue):
def loop(self):
try:
data = self.q.get(timeout=1)
except Queue.Empty:
pass
else:
self.make_request(
self.session.post,
self.url,
params={"channel": self.qname},
data=data,
)
if self._stop.is_set():
raise StopThreadException
class Consumer(ThreadQueue):
def __init__(self, api, qname, pending=False):
super(Consumer, self).__init__(api, qname)
self.params = {"channel": self.qname}
if pending:
self.params["pending"] = "1"
def loop(self):
res = self.make_request(
self.session.get,
self.url,
params=self.params,
stream=True,
)
for line in res.iter_lines(chunk_size=1):
if self._stop.is_set():
raise StopThreadException
if line and line[6:]:
msg = json.loads(line[6:])
self.q.put(msg)
self.q.join()
self.make_request(
self.session.delete,
self.url,
params={
"id": msg["id"],
"channel": msg["channel"],
},
)
class ChannelConsumer(ThreadQueue):
def loop(self):
res = self.make_request(
self.session.get,
self.url,
params={"channel": self.qname},
stream=True,
)
for line in res.iter_lines(chunk_size=1):
if self._stop.is_set():
raise StopThreadException
if line and line[6:]:
msg = json.loads(line[6:])
self.q.put(msg)
self.q.join()
class ChannelProducer(ThreadQueue):
def generate(self):
while True:
try:
data = self.q.get(timeout=1)
except Queue.Empty:
pass
else:
yield "%s\n\n" % (data,)
if self._stop.is_set():
raise StopThreadException
def loop(self):
self.make_request(
self.session.post,
self.url,
params={"channel": self.qname},
data=self.generate(),
)
class Connection(object):
def __init__(self, api="http://localhost:5000"):
self.api = api
self.threads = set()
super(Connection, self).__init__()
def register(self, thread):
thread.start()
self.threads.add(thread)
return thread
def Producer(self, qname):
return self.register(Producer(self.api + "/queue/", qname))
def Consumer(self, qname, pending=False):
return self.register(Consumer(
self.api + "/queue/", qname, pending=pending))
def ChannelConsumer(self, qname):
return self.register(ChannelConsumer(self.api + "/channel/", qname))
def ChannelProducer(self, qname):
return self.register(ChannelProducer(self.api + "/channel/", qname))
def LogConsumer(self, qname):
return self.register(ChannelConsumer(self.api + "/log/", qname))
def LogProcuder(self, qname):
return self.register(ChannelProducer(self.api + "/log/", qname))
def Handler(self, qname):
producer = self.LogProcuder(qname)
return FlasqueHandler(producer)
def close(self):
for thread in self.threads:
thread.stop()
for thread in self.threads:
thread.join()
self.threads = set()
def remove(self, thread):
self.threads.remove(thread)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| philpep/flasque | flasque/client.py | Python | bsd-3-clause | 6,640 |
# -*- coding: utf-8 -*-
#
# MothBall documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 30 20:11:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../src/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = []
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
from recommonmark.parser import CommonMarkParser
source_suffix = ['.rst', '.md']
parsers = {
'.md': CommonMarkParser,
}
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Watchmaker'
copyright = u'2016, Plus3IT'
author = u'Plus3IT'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'MothBall v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Watchmakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Watchmaker.tex', u'Watchmaker Documentation',
u'Plus3IT', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'watchmaker', u'Watchmaker Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Watchmaker', u'Watchmaker Documentation',
author, 'Watchmaker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| MarionTheBull/watchmaker | docs/conf.py | Python | apache-2.0 | 9,706 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
# test_records = frappe.get_test_records('Packing Slip')
from erpnext.tests.utils import ERPNextTestCase
class TestPackingSlip(unittest.TestCase):
pass
| mhbu50/erpnext | erpnext/stock/doctype/packing_slip/test_packing_slip.py | Python | gpl-3.0 | 260 |
#!/usr/bin/env python
import os
import optparse
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import commands, get_summaries, get_similar_commands
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "1.5.1"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this call
# is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0].lower()
#all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(args_else[0].lower())
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return cmd_name, cmd_args
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, cmd_args = parseopts(initial_args)
except PipError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s" % e)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name]()
return command.main(cmd_args)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
pkgs = ['pip']
try:
import setuptools
except ImportError:
pkgs.append('setuptools')
return main(['install', '--upgrade'] + pkgs + sys.argv[1:])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError:
ex = sys.exc_info()[1]
logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex)
req = None
if req is None:
logger.warn('Could not determine repository location of %s' % location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
| danielvdao/TheAnimalFarm | venv/lib/python2.7/site-packages/pip/__init__.py | Python | gpl-2.0 | 9,450 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2014 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Demosite."""
from .version import __version__
__all__ = ("__version__",)
| mvesper/invenio-demosite | invenio_demosite/__init__.py | Python | gpl-2.0 | 887 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RuleManagementEventClaimsDataSource(Model):
"""The claims for a rule management event data source.
:param email_address: the email address.
:type email_address: str
"""
_attribute_map = {
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(self, *, email_address: str=None, **kwargs) -> None:
super(RuleManagementEventClaimsDataSource, self).__init__(**kwargs)
self.email_address = email_address
| lmazuel/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/rule_management_event_claims_data_source_py3.py | Python | mit | 992 |
#!/usr/bin/env python
# ============================================================================
# Project Name : iTrade
# Module Name : iTrade_ansicolors.py
#
# Description: ANSI Colors code
#
# The Original Code is iTrade code (http://itrade.sourceforge.net).
#
# The Initial Developer of the Original Code is Gilles Dumortier.
#
# Portions created by the Initial Developer are Copyright (C) 2004-2008 the
# Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see http://www.gnu.org/licenses/gpl.html
#
# History Rev Description
# 2004-04-11 dgil Wrote it from scratch
# ============================================================================
# ============================================================================
# Imports
# ============================================================================
###
# Copyright (c) 2002, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
# ============================================================================
# ANSI Colors code
#
# Courtesy of Jeremiah Fincher
# ============================================================================
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RESET = '\033[0;0m'
BOLD = '\033[1m'
REVERSE = '\033[2m'
BLACKBG = '\033[40m'
REDBG = '\033[41m'
GREENBG = '\033[42m'
YELLOWBG = '\033[43m'
BLUEBG = '\033[44m'
MAGENTABG = '\033[45m'
CYANBG = '\033[46m'
WHITEBG = '\033[47m'
# ============================================================================
# That's all folks !
# ============================================================================
| eternallyBaffled/itrade | itrade_ansicolors.py | Python | gpl-3.0 | 3,883 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask.ext.babel import lazy_gettext
from wtforms.validators import ValidationError
import re
import requests
from pybossa.util import is_reserved_name
class Unique(object):
"""Validator that checks field uniqueness."""
def __init__(self, query_function, field_name, message=None):
self.query_function = query_function
self.field_name = field_name
if not message: # pragma: no cover
message = lazy_gettext(u'This item already exists')
self.message = message
def __call__(self, form, form_field):
filters = {self.field_name: form_field.data}
check = self.query_function(**filters)
if 'id' in form:
id = form.id.data
else:
id = None
if check and (id is None or id != check.id):
raise ValidationError(self.message)
class NotAllowedChars(object):
"""Validator that checks field not allowed chars"""
not_valid_chars = '$#&\/| '
def __init__(self, message=None):
if not message:
self.message = lazy_gettext(u'%sand space symbols are forbidden'
% self.not_valid_chars)
else: # pragma: no cover
self.message = message
def __call__(self, form, field):
if any(c in field.data for c in self.not_valid_chars):
raise ValidationError(self.message)
class CommaSeparatedIntegers(object):
"""Validator that validates input fields that have comma separated values"""
not_valid_chars = '$#&\/| '
def __init__(self, message=None):
if not message:
self.message = lazy_gettext(u'Only comma separated values are allowed, no spaces')
else: # pragma: no cover
self.message = message
def __call__(self, form, field):
pattern = re.compile('^[\d,]+$')
if pattern.match(field.data) is None:
raise ValidationError(self.message)
class Webhook(object):
"""Validator for webhook URLs"""
def __init__(self, message=None):
if not message:
self.message = lazy_gettext(u'Invalid URL')
else: # pragma: no cover
self.message = message
def __call__(self, form, field):
try:
if field.data:
r = requests.get(field.data)
if r.status_code != 200:
raise ValidationError(self.message)
except requests.exceptions.ConnectionError:
raise ValidationError(lazy_gettext(u"Connection error"))
class ReservedName(object):
"""Validator to avoid URL conflicts when creating/modifying projects or
user accounts"""
def __init__(self, blueprint, message=None):
self.blueprint = blueprint
if not message: # pragma: no cover
message = lazy_gettext(u'This name is used by the system.')
self.message = message
def __call__(self, form, field):
if is_reserved_name(self.blueprint, field.data):
raise ValidationError(self.message)
| MicroMappers/Pybossa | pybossa/forms/validator.py | Python | agpl-3.0 | 3,790 |
"""
This module extends SQLAlchemy and provides additional DDL [#]_
support.
.. [#] SQL Data Definition Language
"""
import re
import warnings
import sqlalchemy
from sqlalchemy import __version__ as _sa_version
warnings.simplefilter('always', DeprecationWarning)
_sa_version = tuple(int(re.match("\d+", x).group(0))
for x in _sa_version.split("."))
SQLA_06 = _sa_version >= (0, 6)
SQLA_07 = _sa_version >= (0, 7)
del re
del _sa_version
from kallithea.lib.dbmigrate.migrate.changeset.schema import *
from kallithea.lib.dbmigrate.migrate.changeset.constraint import *
sqlalchemy.schema.Table.__bases__ += (ChangesetTable,)
sqlalchemy.schema.Column.__bases__ += (ChangesetColumn,)
sqlalchemy.schema.Index.__bases__ += (ChangesetIndex,)
sqlalchemy.schema.DefaultClause.__bases__ += (ChangesetDefaultClause,)
| msabramo/kallithea | kallithea/lib/dbmigrate/migrate/changeset/__init__.py | Python | gpl-3.0 | 841 |
from kalman import *
| mrcaps/rainmon | code/kalman/__init__.py | Python | bsd-3-clause | 21 |
import time
import urllib2
from urllib2 import urlopen
import re
import cookielib, urllib2
from cookielib import CookieJar
import datetime
import sqlite3
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
conn = sqlite3.connect('knowledgeBase.db')
c = conn.cursor()
def createDB():
c.execute("CREATE TABLE knowledgeBase (unix REAL, datestamp TEXT, namedEntity TEXT, relatedWord TEXT)")
c.commit()
def main():
try:
page = 'http://feeds.huffingtonpost.com/huffingtonpost/raw_feed'
sourceCode = opener.open(page).read()
#print sourceCode
try:
titles = re.findall(r'<title>(.*?)</title>',sourceCode)
links = re.findall(r'<link.*?href=\"(.*?)\"',sourceCode)
#for title in titles:
#print title
for link in links:
if '.rdf' in link:
pass
else:
print 'let\'s visit:', link
print ' _____________________________________'
linkSource = opener.open(link).read()
linesOfInterest = re.findall(r'<p>(.*?)</p>',str(linkSource))
print 'Content:'
for eachLine in linesOfInterest:
if '<img width' in eachLine:
pass
elif '<a href=' in eachLine:
pass
else:
print eachLine
time.sleep(1)
except Exception, e:
print str(e)
except Exception,e:
print str(e)
pass
createDB()
| PythonProgramming/2.7-NLTK-videos | nltk7.py | Python | mit | 1,763 |
from five import grok
from plone.dexterity.content import Container
from plone.directives import form
from plone.namedfile.interfaces import IImageScaleTraversable
class IWorkspaceContainer(form.Schema, IImageScaleTraversable):
"""
Marker interface for WorkspaceContainer
"""
class WorkspaceContainer(Container):
"""
A folder to contain WorkspaceFolders
"""
grok.implements(IWorkspaceContainer)
try:
from ploneintranet.attachments.attachments import IAttachmentStoragable
except ImportError:
IAttachmentStoragable = None
if IAttachmentStoragable is not None:
from zope import interface
interface.classImplements(WorkspaceContainer, IAttachmentStoragable)
| ploneintranet/ploneintranet.workspace | src/ploneintranet/workspace/workspacecontainer.py | Python | gpl-2.0 | 705 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_reports'),
]
operations = [
migrations.AlterField(
model_name='reports',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 25, 20, 19, 54, 41806, tzinfo=utc)),
),
]
| mnithya/cs3240-s15-team06-test | polls/migrations/0003_auto_20150325_2019.py | Python | mit | 507 |
"""
Command to delete all rows from the verify_student_historicalverificationdeadline table.
"""
import logging
from lms.djangoapps.verify_student.models import VerificationDeadline
from openedx.core.djangoapps.util.row_delete import delete_rows, BaseDeletionCommand
log = logging.getLogger(__name__)
class Command(BaseDeletionCommand):
"""
Example usage: ./manage.py lms --settings=devstack delete_historical_verify_student_data
"""
help = 'Deletes all historical VerificationDeadline rows (in chunks).'
def handle(self, *args, **options):
"""
Deletes rows, chunking the deletes to avoid long table/row locks.
"""
chunk_size, sleep_between = super(Command, self).handle(*args, **options)
delete_rows(
VerificationDeadline.objects,
'verify_student_historicalverificationdeadline',
'history_id',
chunk_size, sleep_between
)
| teltek/edx-platform | lms/djangoapps/verify_student/management/commands/delete_historical_verify_student_data.py | Python | agpl-3.0 | 943 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Script to download time tables as PDF and extract times into containers that can be used by OSM2GFTS
# or similar
from common import *
import os
import sys
import io
import logging
import requests
import json
import datetime
logger = logging.getLogger("GTFS_get_times")
logging.basicConfig(filename="/var/log/GTFS/minastur.log", level=logging.DEBUG, format="%(asctime)s %(name)s %(levelname)s - %(message)s", datefmt="%Y/%m/%d %H:%M:%S:")
cal = EspiritoSanto()
noneCal = CalendarNull()
blacklisted = [ ]
myRoutes = {}
durationsList = {}
with open('durations.json', 'r') as infile:
durationsList = json.load(infile)
source = {}
with open('../../sources/der-es/minastur.json') as infile:
source = json.load(infile)
myRoutes[u"updated"] = str(datetime.date.today())
myRoutes[u"operator"] = u"Minastur Transporte e Turismo Ltda"
myRoutes[u"network"] = u"Minastur"
myRoutes[u"source"] = u"DER-ES"
myRoutes[u"excluded_lines"] = []
myRoutes[u"routes"] = {}
for bl in blacklisted:
myRoutes[u"excluded_lines"].append(bl)
for ref, origin, destination, via, duration in durationsList['routes']:
if duration > 0:
try:
test = source['routes'][ref]
except KeyError:
source['routes'][ref] = []
for r in source['routes']:
for rr in source['routes'][r]:
try:
if rr['ref'] == ref:
source['routes'][ref].append(rr)
break
except:
pass
for s in source['routes'][ref]:
if s['from'] == origin and s['to'] == destination:
myRoutes = create_json(myRoutes, noneCal, ref, origin, destination, s['services'], s['times'], duration)
else:
myRoutes['excluded_lines'].append(ref)
newBlacklist = uniq(myRoutes[u"excluded_lines"])
newBlacklist.sort()
myRoutes[u"excluded_lines"] = newBlacklist
logger.info(u"Complete blacklist: %s", ", ".join(newBlacklist))
with open('times.json', 'w') as outfile:
json.dump(myRoutes, outfile, sort_keys=True, indent=4)
| Skippern/PDF-scraper-Lorenzutti | creators/minastur/get_times.py | Python | gpl-3.0 | 2,176 |
import unittest
import mock
from pyrax.cloudcdn import CloudCDNClient
from pyrax.cloudcdn import CloudCDNFlavor
from pyrax.cloudcdn import CloudCDNFlavorManager
from pyrax.cloudcdn import CloudCDNService
from pyrax.cloudcdn import CloudCDNServiceManager
class CloudCDNTest(unittest.TestCase):
@mock.patch("pyrax.client.BaseClient.method_get")
def test_ping(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
sot.ping()
mock_get.assert_called_with("/ping")
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.list")
def test_list_flavors(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_flavors()
mock_list.assert_called_once_with()
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.get")
def test_get_flavor(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
flavor = "flavor"
sot.get_flavor(flavor)
mock_get.assert_called_once_with(flavor)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.list")
def test_list_services(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_services()
mock_list.assert_called_with(limit=None, marker=None)
kwargs = {"limit": 1, "marker": 2}
sot.list_services(**kwargs)
mock_list.assert_called_with(**kwargs)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.get")
def test_get_service(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.get_service(service)
mock_get.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.create")
def test_create_service(self, mock_create):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3, 4, 5, 6)
sot.create_service(*args)
mock_create.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.patch")
def test_patch_service(self, mock_patch):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2)
sot.patch_service(*args)
mock_patch.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete")
def test_delete_service(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.delete_service(service)
mock_delete.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete_assets")
def test_delete_assets(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3)
sot.delete_assets(*args)
mock_delete.assert_called_once_with(*args)
| briancurtin/pyrax | tests/unit/test_cloud_cdn.py | Python | apache-2.0 | 2,703 |
import os
import sys
dirname = os.path.dirname(__file__)
lib_path = os.path.abspath(os.path.join(dirname, ".."))
packages_path = os.path.join(lib_path, "site-packages")
if packages_path not in sys.path:
sys.path.append(packages_path)
| ghostlines/ghostlines-robofont | src/lib/ghostlines/__init__.py | Python | mit | 240 |
"""file_parser.py reads text file and parse the item into a list."""
def file_to_list(input_file):
data_list_trim = []
try:
with open(input_file) as in_put:
input_data = in_put.readlines()
if len(input_data) == 1:
print()
data_list = input_data[0].replace('"', '').strip()
data_list_trim = data_list.split(',')
elif len(input_data) > 1:
print()
for row in input_data:
row_list = row.replace('"', '').strip()
row_list_trim = row_list.split(',')
data_list_trim = data_list_trim + row_list_trim
else:
print('no content is the file')
except OSError as err:
print('Failed to open file', err)
return data_list_trim
| roy-boy/python_scripts | file_parser.py | Python | gpl-3.0 | 847 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.signals import (acl, agreements, attachments, category, event, event_management, menu, plugin, rb, rh,
users)
from indico.core.signals.core import *
__all__ = ('acl', 'agreements', 'attachments', 'category', 'event', 'event_management', 'menu', 'plugin', 'rb', 'rh',
'users')
| mic4ael/indico | indico/core/signals/__init__.py | Python | mit | 553 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name)
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return device_name == 'swap' or is_ephemeral(device_name)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type"""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'"""
return _dev.sub('', device_name) if device_name else device_name
_pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
""" remove both leading /dev/ and xvd or sd or vd """
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
| NoBodyCam/TftpPxeBootBareMetal | nova/block_device.py | Python | apache-2.0 | 2,403 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, Extension, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = ['requests>=2.21.0']
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Finbarr Brady",
author_email='fbradyirl@github.io',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
description='Provides a python interface to interact with a hikvision camera',
install_requires=requirements,
license='MIT',
long_description=readme,
include_package_data=True,
long_description_content_type="text/markdown",
keywords='hikvision camera python cgi interface',
name='hikvision',
packages=['hikvision'],
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/fbradyirl/hikvision',
version='2.0.4',
zip_safe=False,
)
| fbradyirl/hikvision | setup.py | Python | mit | 1,172 |
"""Helper methods to handle the time in Home Assistant."""
from __future__ import annotations
from contextlib import suppress
import datetime as dt
import re
from typing import Any, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
NATIVE_UTC = dt.timezone.utc
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> dt.tzinfo | None:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(NATIVE_UTC)
def now(time_zone: dt.tzinfo | None = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(dt_or_d: dt.date | dt.datetime | None = None) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
else:
date = dt_or_d
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> dt.datetime | None:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
with suppress(ValueError, IndexError):
return ciso8601.parse_datetime(dt_str)
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: dt.tzinfo | None = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> dt.date | None:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> dt.time | None:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
delta = (now() - date).total_seconds()
rounded_delta = round(delta)
units = ["second", "minute", "hour", "day", "month"]
factors = [60, 60, 24, 30, 12]
selected_unit = "year"
for i, next_factor in enumerate(factors):
if rounded_delta < next_factor:
selected_unit = units[i]
break
delta /= next_factor
rounded_delta = round(delta)
return formatn(rounded_delta, selected_unit)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> list[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str):
if parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
else:
res = [int(parameter)]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = sorted(int(x) for x in parameter)
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: list[int],
minutes: list[int],
hours: list[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lower_bound(arr: list[int], cmp: int) -> int | None:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = UTC if result.tzinfo == NATIVE_UTC else result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst()) or dt.timedelta(0)
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
| w1ll1am23/home-assistant | homeassistant/util/dt.py | Python | apache-2.0 | 12,636 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Package
class PackageAdmin(admin.ModelAdmin):
list_display = (
'member', 'version', 'platform', 'arch',
'get_display_size', 'update', )
list_filter = ('version', 'platform', 'arch', 'update', )
search_fields = ('id', 'member__email', )
def get_display_size(self, obj):
return "%.2f MB" % (obj.size / 1024 / 1024)
get_display_size.short_description = 'Size'
admin.site.register(Package, PackageAdmin)
| vinta/sublimall-server | sublimall/storage/admin.py | Python | mit | 526 |
# -*- coding: utf-8 -*-
# Copyright 2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import signal
from gi.repository import Gtk
from quodlibet import _
from quodlibet import app
from quodlibet import print_d
from quodlibet import print_w
from quodlibet.plugins import PluginConfig
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import Button
from quodlibet.qltk import ErrorMessage
from quodlibet.qltk import Icons
from quodlibet.qltk.entry import UndoEntry
from quodlibet.util import escape
class ProjectM(EventPlugin):
"""Launch external visualisations, e.g. via projectM
Try this first (Ubuntu/Debian):
sudo apt-get install projectm-pulseaudio
"""
_config = PluginConfig(__name__)
PLUGIN_ID = "visualisations"
PLUGIN_NAME = _("Launch Visualisations")
PLUGIN_ICON = Icons.IMAGE_X_GENERIC
PLUGIN_DESC = _("Launch external visualisations.")
DEFAULT_EXEC = 'projectM-pulseaudio'
def __init__(self):
self._pid = None
def enabled(self):
from gi.repository import GLib
print_d("Starting %s" % self.PLUGIN_NAME)
try:
self._pid, fdin, fdout, fderr = GLib.spawn_async(
argv=self.executable.split(),
flags=GLib.SpawnFlags.SEARCH_PATH,
standard_output=True,
standard_input=True)
except GLib.Error as e:
msg = ((_("Couldn't run visualisations using '%s'") + " (%s)") %
(escape(self.executable), escape(e.message)))
ErrorMessage(title=_("Error"), description=msg,
parent=app.window).run()
else:
# self._stdin = os.fdopen(fdin, mode='w')
print_d("Launched with PID: %s" % self._pid)
def disabled(self):
if not self._pid:
return
print_d("Shutting down %s" % self.PLUGIN_NAME)
try:
os.kill(self._pid, signal.SIGTERM)
os.kill(self._pid, signal.SIGKILL)
except Exception as e:
print_w("Couldn't shut down cleanly (%s)" % e)
def PluginPreferences(self, *args):
vbox = Gtk.VBox(spacing=12)
label = Gtk.Label(label=_("Visualiser executable:"))
def edited(widget):
self.executable = widget.get_text()
entry = UndoEntry()
entry.connect('changed', edited)
entry.set_text(self.executable)
hbox = Gtk.HBox(spacing=6)
hbox.pack_start(label, False, False, 0)
hbox.pack_start(entry, True, True, 0)
vbox.pack_start(hbox, True, True, 0)
def refresh_clicked(widget):
self.disabled()
self.enabled()
refresh_button = Button(_("Reload"), Icons.VIEW_REFRESH)
refresh_button.connect('clicked', refresh_clicked)
vbox.pack_start(refresh_button, False, False, 0)
return vbox
@property
def executable(self):
return self._config.get('executable', self.DEFAULT_EXEC)
@executable.setter
def executable(self, value):
self._config.set('executable', value)
| elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/visualisations.py | Python | gpl-2.0 | 3,329 |
import os
import sys
from grace.utility import *
from grace.utility import LOG as L
from grace.script import testcase_base
class TestCase_Android(testcase_base.TestCase_Unit):
def adb_screenshot(self, filename=None):
if filename == None: filename = "capture.png"
L.debug("capture file : %s" % os.path.join(TMP_DIR, filename))
return self.adb.snapshot(filename, TMP_DIR)
def adb_tap(self, x, y):
return self.adb.tap(x, y)
| TE-ToshiakiTanaka/stve | project/grace/script/testcase_android.py | Python | mit | 466 |
"""
WSGI config for tests_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests_project.settings")
application = get_wsgi_application()
| andytwoods/zappa-call-later | tests/tests_project/tests_project/wsgi.py | Python | mit | 403 |
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined
otherwise.
.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
from __future__ import unicode_literals
import sys # Only needed to check Python version
import os
import re
import pydoc
from .docscrape_sphinx import get_doc_object
from .docscrape_sphinx import SphinxDocString
from sphinx.util.compat import Directive
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
show_class_members=app.config.numpydoc_show_class_members)
if what == 'module':
# Strip top title
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I | re.S)
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
else:
doc = get_doc_object(obj, what, "\n".join(lines), config=cfg)
if sys.version_info[0] < 3:
lines[:] = unicode(doc).splitlines()
else:
lines[:] = str(doc).splitlines()
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [u'', u'.. htmlonly::', '']
lines += [u' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(r'^\d+$', r):
new_r = "R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj,
options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub("^[^(]*", "", doc['Signature'])
return sig, ''
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
if sys.version_info[0] < 3:
app.connect(b'autodoc-process-docstring', mangle_docstrings)
app.connect(b'autodoc-process-signature', mangle_signature)
else:
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
#-----------------------------------------------------------------------------
# Docstring-mangling domains
#-----------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in self.directive_mangling_map.items():
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| loli/sklearn-ensembletrees | doc/sphinxext/numpy_ext/numpydoc.py | Python | bsd-3-clause | 6,030 |
# -*- coding: utf-8 -*-
# using code from
# https://blog.darmasoft.net/2013/06/30/using-pure-python-otr.html
import potr
import os
import logging
logging.basicConfig(level=logging.DEBUG)
from django.core.cache import cache
log = logging.getLogger()
class OTRContext(potr.context.Context):
"""Context is like a connection/conversation between two persons."""
def getPolicy(self, key):
DEFAULT_POLICY_FLAGS = {
'ALLOW_V1': False,
'ALLOW_V2': True,
'REQUIRE_ENCRYPTION': True,
}
if key in DEFAULT_POLICY_FLAGS:
return DEFAULT_POLICY_FLAGS[key]
else:
return False
def inject(self, msg, appdata=None):
# This method send some message from account to the target peer
log.debug('Call inject; %s => %s; %s', self.user, self.peer, msg)
def setState(self, newstate):
# Hook to catch changes at the state variable to notify user about
# state changes lik encryption is ready
log.debug('set State to %s', newstate)
super(OTRContext, self).setState(newstate)
def plain_context(self):
return self.getState() == potr.STATE_PLAINTEXT
def encrypted_context(self):
return self.getState() == potr.STATE_ENCRYPTED
def finished_context(self):
return self.getState() == potr.STATE_FINISHED
class OTRAccount(potr.context.Account):
def __init__(self, jid):
super(OTRAccount, self).__init__(jid, '-', 1024)
self.key = 'privatekey-%s.key3' % jid
def loadPrivkey(self):
x = cache.get(self.key)
if x:
return potr.crypt.PK.parsePrivateKey(x)[0]
return None
def savePrivkey(self):
cache.set(self.key, self.getPrivkey().serializePrivateKey())
class OTRContextManager(object):
def __init__(self, jid):
# jid = logged in user
self.account = OTRAccount(jid)
self.contexts = {}
def context_to(self, other):
if other not in self.contexts:
# No context already for other than create a new context
self.contexts[other] = OTRContext(self.account, other)
return self.contexts[other]
def incoming(self, msg):
# msg.dict = from, body, type
logging.getLogger().debug("Incoming message by %s: %s",
self.account, msg)
otrctx = self.context_to(msg['from'])
try:
# attempt to pass the message through
# *potr.context.Context.receiveMessage*
# there are a couple of possible cases
res = otrctx.receiveMessage(msg["body"])
return res, True
except potr.context.UnencryptedMessage:
# potr raises an UnencryptedMessage exception when a message is
# unencrypted but the context is encrypted
# this indicates a plaintext message
# came through a supposedly encrypted
# channel it is appropriate here to warn your user!
return msg['body'], False
def outgoing(self, jid, msg):
otrctx = self.context_to(jid)
if otrctx.state == potr.context.STATE_ENCRYPTED:
logging.getLogger().debug("sending encrypting message")
# the context state should currently be encrypted,
# so encrypt outgoing message
# passing the plain text message into Context.sendMessage will
# trigger Context.inject with an encrypted message.
otrctx.sendMessage(0, msg)
else:
# the outgoing state is not encrypted, so send it plain text
logging.getLogger().debug("sending message unencrypted")
otrctx.inject(msg)
# usage example:
if __name__ == '__main__':
import types
bob = MyOtrContextManager('bob')
bob_account = bob.account
alice = MyOtrContextManager('alice')
alice_account = alice.account
def bob_to_alice(self, msg, appdata=None):
print "Bob => Alice: %s" % msg
alice.incoming({
'from': 'bob',
'body': msg,
'type': 'xxx'
})
bob.context_to('alice').inject = types.MethodType(bob_to_alice, bob)
def alice_to_bob(self, msg, appdata=None):
print "Alice => Bob: %s" % msg
bob.incoming({
'from': 'alice',
'body': msg,
'type': 'xxx'
})
alice.context_to('bob').inject = types.MethodType(alice_to_bob, alice)
bob.context_to('alice').inject('Hello World!')
alice.context_to('bob').inject('Hello World!')
alice_con_bob = alice.context_to('bob')
inv_msg = alice_con_bob.sendMessage(
alice_con_bob.getPolicy('ALLOW_V2'), ''
)
alice_con_bob.inject(inv_msg)
alice.outgoing('bob', 'Hello World!')
bob.outgoing('alice', 'Hello Alice')
| mfa/djangodash2013 | otrme/otrbackend/magic.py | Python | bsd-3-clause | 4,845 |
# This file is part of fedmsg.
# Copyright (C) 2015 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" Tests for infra git messages (ansible, dns, etc..) """
import unittest
from fedmsg_meta_fedora_infrastructure.tests.base import Base
from .common import add_doc
class TestInfraGitPuppet(Base):
""" These messages get published by git repos owned by the Fedora
Infrastructure team. These are the repos we use to manage the systems that
run fedoraproject.org.
In particular, this message is an example of a message from our "puppet"
repo, which is older and being retired in favor of our ansible repo.
"""
expected_title = "infragit.receive"
expected_subti = 'ralph pushed a commit to the fedora-infra puppet ' + \
'repo (master): "Testing again for fedmsg."'
expected_link = '' # No links for puppet
expected_icon = "https://apps.fedoraproject.org/img/icons/git-logo.png"
expected_secondary_icon = ("https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c"
"?s=64&d=retro")
expected_usernames = set(['ralph'])
expected_agent = 'ralph'
expected_packages = set()
expected_objects = set(['puppet/test'])
msg = {
"i": 1,
"timestamp": 1439584387.0,
"msg_id": "2015-ee126d0a-a89b-4a09-ba15-4ca3382eb8fe",
"topic": "org.fedoraproject.prod.infragit.receive",
"source_version": "0.6.5",
"msg": {
"commit": {
"username": "ralph",
"stats": {
"files": {
"test": {
"deletions": 0,
"additions": 0,
"lines": 0
}
},
"total": {
"deletions": 0,
"files": 1,
"additions": 0,
"lines": 0
}
},
"name": "Ralph Bean",
"rev": "6e7177a1d5fd712cb53ce70acb17b92c5f791f08",
"agent": "ralph",
"summary": "Testing again for fedmsg.",
"repo": "",
"branch": "master",
"seen": False,
"path": "/git/puppet",
"message": "Testing again for fedmsg.\n",
"email": "rbean@redhat.com"
}
}
}
class TestInfraGitAnsible(Base):
""" These messages get published by git repos owned by the Fedora
Infrastructure team. These are the repos we use to manage the systems that
run fedoraproject.org.
In particular, this message is an example of a message from our "ansible"
repo, which is where we do most of our work.
"""
expected_title = "infragit.receive"
expected_subti = 'ralph pushed a commit to the fedora-infra ansible ' + \
'repo (master): "Testing again for fedmsg."'
expected_link = "https://infrastructure.fedoraproject.org/cgit/" + \
"ansible.git/commit/" + \
"?h=master&id=6e7177a1d5fd712cb53ce70acb17b92c5f791f08"
expected_icon = "https://apps.fedoraproject.org/img/icons/git-logo.png"
expected_secondary_icon = ("https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c"
"?s=64&d=retro")
expected_usernames = set(['ralph'])
expected_agent = 'ralph'
expected_packages = set()
expected_objects = set(['ansible/test'])
msg = {
"i": 1,
"timestamp": 1439584387.0,
"msg_id": "2015-ee126d0a-a89b-4a09-ba15-4ca3382eb8fe",
"topic": "org.fedoraproject.prod.infragit.receive",
"source_version": "0.6.5",
"msg": {
"commit": {
"username": "ralph",
"stats": {
"files": {
"test": {
"deletions": 0,
"additions": 0,
"lines": 0
}
},
"total": {
"deletions": 0,
"files": 1,
"additions": 0,
"lines": 0
}
},
"name": "Ralph Bean",
"rev": "6e7177a1d5fd712cb53ce70acb17b92c5f791f08",
"agent": "ralph",
"summary": "Testing again for fedmsg.",
"repo": "",
"branch": "master",
"seen": False,
"path": "/git/ansible",
"message": "Testing again for fedmsg.\n",
"email": "rbean@redhat.com"
}
}
}
add_doc(locals())
if __name__ == '__main__':
unittest.main()
| fedora-infra/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/tests/infragit.py | Python | lgpl-2.1 | 5,619 |
import socket
from sys import platform
from functools import wraps, partial
from itertools import count, chain
from weakref import WeakValueDictionary
from errno import errorcode
from six import text_type as _text_type
from six import binary_type as _binary_type
from six import integer_types as integer_types
from six import int2byte, indexbytes
from OpenSSL._util import (
ffi as _ffi,
lib as _lib,
exception_from_error_queue as _exception_from_error_queue,
native as _native,
text_to_bytes_and_warn as _text_to_bytes_and_warn,
path_string as _path_string,
UNSPECIFIED as _UNSPECIFIED,
)
from OpenSSL.crypto import (
FILETYPE_PEM, _PassphraseHelper, PKey, X509Name, X509, X509Store)
try:
_memoryview = memoryview
except NameError:
class _memoryview(object):
pass
try:
_buffer = buffer
except NameError:
class _buffer(object):
pass
OPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER
SSLEAY_VERSION = _lib.SSLEAY_VERSION
SSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS
SSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM
SSLEAY_DIR = _lib.SSLEAY_DIR
SSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON
SENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN
RECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN
SSLv2_METHOD = 1
SSLv3_METHOD = 2
SSLv23_METHOD = 3
TLSv1_METHOD = 4
TLSv1_1_METHOD = 5
TLSv1_2_METHOD = 6
OP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2
OP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3
OP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1
OP_NO_TLSv1_1 = getattr(_lib, "SSL_OP_NO_TLSv1_1", 0)
OP_NO_TLSv1_2 = getattr(_lib, "SSL_OP_NO_TLSv1_2", 0)
try:
MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS
except AttributeError:
pass
OP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE
OP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA
OP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG
OP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG
OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = _lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
OP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
OP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
try:
OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING
except AttributeError:
pass
OP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG
OP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG
OP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG
OP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
OP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE
OP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG
OP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1
OP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2
OP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG
OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG= _lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
try:
OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION
except AttributeError:
pass
OP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU
OP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE
try:
OP_NO_TICKET = _lib.SSL_OP_NO_TICKET
except AttributeError:
pass
OP_ALL = _lib.SSL_OP_ALL
VERIFY_PEER = _lib.SSL_VERIFY_PEER
VERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT
VERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE
VERIFY_NONE = _lib.SSL_VERIFY_NONE
SESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF
SESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT
SESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER
SESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH
SESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR
SESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
SESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE
SESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL
SSL_ST_CONNECT = _lib.SSL_ST_CONNECT
SSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT
SSL_ST_MASK = _lib.SSL_ST_MASK
SSL_ST_INIT = _lib.SSL_ST_INIT
SSL_ST_BEFORE = _lib.SSL_ST_BEFORE
SSL_ST_OK = _lib.SSL_ST_OK
SSL_ST_RENEGOTIATE = _lib.SSL_ST_RENEGOTIATE
SSL_CB_LOOP = _lib.SSL_CB_LOOP
SSL_CB_EXIT = _lib.SSL_CB_EXIT
SSL_CB_READ = _lib.SSL_CB_READ
SSL_CB_WRITE = _lib.SSL_CB_WRITE
SSL_CB_ALERT = _lib.SSL_CB_ALERT
SSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT
SSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT
SSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP
SSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT
SSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP
SSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT
SSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START
SSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE
class Error(Exception):
"""
An error occurred in an `OpenSSL.SSL` API.
"""
_raise_current_error = partial(_exception_from_error_queue, Error)
class WantReadError(Error):
pass
class WantWriteError(Error):
pass
class WantX509LookupError(Error):
pass
class ZeroReturnError(Error):
pass
class SysCallError(Error):
pass
class _CallbackExceptionHelper(object):
"""
A base class for wrapper classes that allow for intelligent exception
handling in OpenSSL callbacks.
:ivar list _problems: Any exceptions that occurred while executing in a
context where they could not be raised in the normal way. Typically
this is because OpenSSL has called into some Python code and requires a
return value. The exceptions are saved to be raised later when it is
possible to do so.
"""
def __init__(self):
self._problems = []
def raise_if_problem(self):
"""
Raise an exception from the OpenSSL error queue or that was previously
captured whe running a callback.
"""
if self._problems:
try:
_raise_current_error()
except Error:
pass
raise self._problems.pop(0)
class _VerifyHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as a certificate verification
callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ok, store_ctx):
cert = X509.__new__(X509)
cert._x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx)
error_number = _lib.X509_STORE_CTX_get_error(store_ctx)
error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx)
index = _lib.SSL_get_ex_data_X509_STORE_CTX_idx()
ssl = _lib.X509_STORE_CTX_get_ex_data(store_ctx, index)
connection = Connection._reverse_mapping[ssl]
try:
result = callback(connection, cert, error_number, error_depth, ok)
except Exception as e:
self._problems.append(e)
return 0
else:
if result:
_lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK)
return 1
else:
return 0
self.callback = _ffi.callback(
"int (*)(int, X509_STORE_CTX *)", wrapper)
class _NpnAdvertiseHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an NPN advertisement callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
protos = callback(conn)
# Join the protocols into a Python bytestring, length-prefixing
# each element.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Save our callback arguments on the connection object. This is
# done to make sure that they don't get freed before OpenSSL
# uses them. Then, return them appropriately in the output
# parameters.
conn._npn_advertise_callback_args = [
_ffi.new("unsigned int *", len(protostr)),
_ffi.new("unsigned char[]", protostr),
]
outlen[0] = conn._npn_advertise_callback_args[0][0]
out[0] = conn._npn_advertise_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
"int (*)(SSL *, const unsigned char **, unsigned int *, void *)",
wrapper
)
class _NpnSelectHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an NPN selection callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, in_, inlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
# The string passed to us is actually made up of multiple
# length-prefixed bytestrings. We need to split that into a
# list.
instr = _ffi.buffer(in_, inlen)[:]
protolist = []
while instr:
l = indexbytes(instr, 0)
proto = instr[1:l+1]
protolist.append(proto)
instr = instr[l+1:]
# Call the callback
outstr = callback(conn, protolist)
# Save our callback arguments on the connection object. This is
# done to make sure that they don't get freed before OpenSSL
# uses them. Then, return them appropriately in the output
# parameters.
conn._npn_select_callback_args = [
_ffi.new("unsigned char *", len(outstr)),
_ffi.new("unsigned char[]", outstr),
]
outlen[0] = conn._npn_select_callback_args[0][0]
out[0] = conn._npn_select_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
"int (*)(SSL *, unsigned char **, unsigned char *, "
"const unsigned char *, unsigned int, void *)",
wrapper
)
class _ALPNSelectHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an ALPN selection callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, in_, inlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
# The string passed to us is made up of multiple
# length-prefixed bytestrings. We need to split that into a
# list.
instr = _ffi.buffer(in_, inlen)[:]
protolist = []
while instr:
encoded_len = indexbytes(instr, 0)
proto = instr[1:encoded_len + 1]
protolist.append(proto)
instr = instr[encoded_len + 1:]
# Call the callback
outstr = callback(conn, protolist)
if not isinstance(outstr, _binary_type):
raise TypeError("ALPN callback must return a bytestring.")
# Save our callback arguments on the connection object to make
# sure that they don't get freed before OpenSSL can use them.
# Then, return them in the appropriate output parameters.
conn._alpn_select_callback_args = [
_ffi.new("unsigned char *", len(outstr)),
_ffi.new("unsigned char[]", outstr),
]
outlen[0] = conn._alpn_select_callback_args[0][0]
out[0] = conn._alpn_select_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
"int (*)(SSL *, unsigned char **, unsigned char *, "
"const unsigned char *, unsigned int, void *)",
wrapper
)
def _asFileDescriptor(obj):
fd = None
if not isinstance(obj, integer_types):
meth = getattr(obj, "fileno", None)
if meth is not None:
obj = meth()
if isinstance(obj, integer_types):
fd = obj
if not isinstance(fd, integer_types):
raise TypeError("argument must be an int, or have a fileno() method.")
elif fd < 0:
raise ValueError(
"file descriptor cannot be a negative integer (%i)" % (fd,))
return fd
def SSLeay_version(type):
"""
Return a string describing the version of OpenSSL in use.
:param type: One of the SSLEAY_ constants defined in this module.
"""
return _ffi.string(_lib.SSLeay_version(type))
def _requires_npn(func):
"""
Wraps any function that requires NPN support in OpenSSL, ensuring that
NotImplementedError is raised if NPN is not present.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if not _lib.Cryptography_HAS_NEXTPROTONEG:
raise NotImplementedError("NPN not available.")
return func(*args, **kwargs)
return wrapper
def _requires_alpn(func):
"""
Wraps any function that requires ALPN support in OpenSSL, ensuring that
NotImplementedError is raised if ALPN support is not present.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if not _lib.Cryptography_HAS_ALPN:
raise NotImplementedError("ALPN not available.")
return func(*args, **kwargs)
return wrapper
class Session(object):
pass
class Context(object):
"""
:py:obj:`OpenSSL.SSL.Context` instances define the parameters for setting up
new SSL connections.
"""
_methods = {
SSLv2_METHOD: "SSLv2_method",
SSLv3_METHOD: "SSLv3_method",
SSLv23_METHOD: "SSLv23_method",
TLSv1_METHOD: "TLSv1_method",
TLSv1_1_METHOD: "TLSv1_1_method",
TLSv1_2_METHOD: "TLSv1_2_method",
}
_methods = dict(
(identifier, getattr(_lib, name))
for (identifier, name) in _methods.items()
if getattr(_lib, name, None) is not None)
def __init__(self, method):
"""
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or
TLSv1_METHOD.
"""
if not isinstance(method, integer_types):
raise TypeError("method must be an integer")
try:
method_func = self._methods[method]
except KeyError:
raise ValueError("No such protocol")
method_obj = method_func()
if method_obj == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
context = _lib.SSL_CTX_new(method_obj)
if context == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
context = _ffi.gc(context, _lib.SSL_CTX_free)
self._context = context
self._passphrase_helper = None
self._passphrase_callback = None
self._passphrase_userdata = None
self._verify_helper = None
self._verify_callback = None
self._info_callback = None
self._tlsext_servername_callback = None
self._app_data = None
self._npn_advertise_helper = None
self._npn_advertise_callback = None
self._npn_select_helper = None
self._npn_select_callback = None
self._alpn_select_helper = None
self._alpn_select_callback = None
# SSL_CTX_set_app_data(self->ctx, self);
# SSL_CTX_set_mode(self->ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
# SSL_MODE_AUTO_RETRY);
self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE)
def load_verify_locations(self, cafile, capath=None):
"""
Let SSL know where we can find trusted certificates for the certificate
chain
:param cafile: In which file we can find the certificates (``bytes`` or
``unicode``).
:param capath: In which directory we can find the certificates
(``bytes`` or ``unicode``).
:return: None
"""
if cafile is None:
cafile = _ffi.NULL
else:
cafile = _path_string(cafile)
if capath is None:
capath = _ffi.NULL
else:
capath = _path_string(capath)
load_result = _lib.SSL_CTX_load_verify_locations(self._context, cafile, capath)
if not load_result:
_raise_current_error()
def _wrap_callback(self, callback):
@wraps(callback)
def wrapper(size, verify, userdata):
return callback(size, verify, self._passphrase_userdata)
return _PassphraseHelper(
FILETYPE_PEM, wrapper, more_args=True, truncate=True)
def set_passwd_cb(self, callback, userdata=None):
"""
Set the passphrase callback
:param callback: The Python callback to use
:param userdata: (optional) A Python object which will be given as
argument to the callback
:return: None
"""
if not callable(callback):
raise TypeError("callback must be callable")
self._passphrase_helper = self._wrap_callback(callback)
self._passphrase_callback = self._passphrase_helper.callback
_lib.SSL_CTX_set_default_passwd_cb(
self._context, self._passphrase_callback)
self._passphrase_userdata = userdata
def set_default_verify_paths(self):
"""
Use the platform-specific CA certificate locations
:return: None
"""
set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)
if not set_result:
# TODO: This is untested.
_raise_current_error()
def use_certificate_chain_file(self, certfile):
"""
Load a certificate chain from a file
:param certfile: The name of the certificate chain file (``bytes`` or
``unicode``).
:return: None
"""
certfile = _path_string(certfile)
result = _lib.SSL_CTX_use_certificate_chain_file(self._context, certfile)
if not result:
_raise_current_error()
def use_certificate_file(self, certfile, filetype=FILETYPE_PEM):
"""
Load a certificate from a file
:param certfile: The name of the certificate file (``bytes`` or
``unicode``).
:param filetype: (optional) The encoding of the file, default is PEM
:return: None
"""
certfile = _path_string(certfile)
if not isinstance(filetype, integer_types):
raise TypeError("filetype must be an integer")
use_result = _lib.SSL_CTX_use_certificate_file(self._context, certfile, filetype)
if not use_result:
_raise_current_error()
def use_certificate(self, cert):
"""
Load a certificate from a X509 object
:param cert: The X509 object
:return: None
"""
if not isinstance(cert, X509):
raise TypeError("cert must be an X509 instance")
use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509)
if not use_result:
_raise_current_error()
def add_extra_chain_cert(self, certobj):
"""
Add certificate to chain
:param certobj: The X509 certificate object to add to the chain
:return: None
"""
if not isinstance(certobj, X509):
raise TypeError("certobj must be an X509 instance")
copy = _lib.X509_dup(certobj._x509)
add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)
if not add_result:
# TODO: This is untested.
_lib.X509_free(copy)
_raise_current_error()
def _raise_passphrase_exception(self):
if self._passphrase_helper is None:
_raise_current_error()
exception = self._passphrase_helper.raise_if_problem(Error)
if exception is not None:
raise exception
def use_privatekey_file(self, keyfile, filetype=_UNSPECIFIED):
"""
Load a private key from a file
:param keyfile: The name of the key file (``bytes`` or ``unicode``)
:param filetype: (optional) The encoding of the file, default is PEM
:return: None
"""
keyfile = _path_string(keyfile)
if filetype is _UNSPECIFIED:
filetype = FILETYPE_PEM
elif not isinstance(filetype, integer_types):
raise TypeError("filetype must be an integer")
use_result = _lib.SSL_CTX_use_PrivateKey_file(
self._context, keyfile, filetype)
if not use_result:
self._raise_passphrase_exception()
def use_privatekey(self, pkey):
"""
Load a private key from a PKey object
:param pkey: The PKey object
:return: None
"""
if not isinstance(pkey, PKey):
raise TypeError("pkey must be a PKey instance")
use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey)
if not use_result:
self._raise_passphrase_exception()
def check_privatekey(self):
"""
Check that the private key and certificate match up
:return: None (raises an exception if something's wrong)
"""
if not _lib.SSL_CTX_check_private_key(self._context):
_raise_current_error()
def load_client_ca(self, cafile):
"""
Load the trusted certificates that will be sent to the client (basically
telling the client "These are the guys I trust"). Does not actually
imply any of the certificates are trusted; that must be configured
separately.
:param cafile: The name of the certificates file
:return: None
"""
def set_session_id(self, buf):
"""
Set the session identifier. This is needed if you want to do session
resumption.
:param buf: A Python object that can be safely converted to a string
:returns: None
"""
def set_session_cache_mode(self, mode):
"""
Enable/disable session caching and specify the mode used.
:param mode: One or more of the SESS_CACHE_* flags (combine using
bitwise or)
:returns: The previously set caching mode.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)
def get_session_cache_mode(self):
"""
:returns: The currently used cache mode.
"""
return _lib.SSL_CTX_get_session_cache_mode(self._context)
def set_verify(self, mode, callback):
"""
Set the verify mode and verify callback
:param mode: The verify mode, this is either VERIFY_NONE or
VERIFY_PEER combined with possible other flags
:param callback: The Python callback to use
:return: None
See SSL_CTX_set_verify(3SSL) for further details.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
if not callable(callback):
raise TypeError("callback must be callable")
self._verify_helper = _VerifyHelper(callback)
self._verify_callback = self._verify_helper.callback
_lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)
def set_verify_depth(self, depth):
"""
Set the verify depth
:param depth: An integer specifying the verify depth
:return: None
"""
if not isinstance(depth, integer_types):
raise TypeError("depth must be an integer")
_lib.SSL_CTX_set_verify_depth(self._context, depth)
def get_verify_mode(self):
"""
Get the verify mode
:return: The verify mode
"""
return _lib.SSL_CTX_get_verify_mode(self._context)
def get_verify_depth(self):
"""
Get the verify depth
:return: The verify depth
"""
return _lib.SSL_CTX_get_verify_depth(self._context)
def load_tmp_dh(self, dhfile):
"""
Load parameters for Ephemeral Diffie-Hellman
:param dhfile: The file to load EDH parameters from (``bytes`` or
``unicode``).
:return: None
"""
dhfile = _path_string(dhfile)
bio = _lib.BIO_new_file(dhfile, b"r")
if bio == _ffi.NULL:
_raise_current_error()
bio = _ffi.gc(bio, _lib.BIO_free)
dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
dh = _ffi.gc(dh, _lib.DH_free)
_lib.SSL_CTX_set_tmp_dh(self._context, dh)
def set_tmp_ecdh(self, curve):
"""
Select a curve to use for ECDHE key exchange.
:param curve: A curve object to use as returned by either
:py:meth:`OpenSSL.crypto.get_elliptic_curve` or
:py:meth:`OpenSSL.crypto.get_elliptic_curves`.
:return: None
"""
_lib.SSL_CTX_set_tmp_ecdh(self._context, curve._to_EC_KEY())
def set_cipher_list(self, cipher_list):
"""
Change the cipher list
:param cipher_list: A cipher list, see ciphers(1)
:return: None
"""
if isinstance(cipher_list, _text_type):
cipher_list = cipher_list.encode("ascii")
if not isinstance(cipher_list, bytes):
raise TypeError("cipher_list must be bytes or unicode")
result = _lib.SSL_CTX_set_cipher_list(self._context, cipher_list)
if not result:
_raise_current_error()
def set_client_ca_list(self, certificate_authorities):
"""
Set the list of preferred client certificate signers for this server context.
This list of certificate authorities will be sent to the client when the
server requests a client certificate.
:param certificate_authorities: a sequence of X509Names.
:return: None
"""
name_stack = _lib.sk_X509_NAME_new_null()
if name_stack == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
try:
for ca_name in certificate_authorities:
if not isinstance(ca_name, X509Name):
raise TypeError(
"client CAs must be X509Name objects, not %s objects" % (
type(ca_name).__name__,))
copy = _lib.X509_NAME_dup(ca_name._name)
if copy == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
push_result = _lib.sk_X509_NAME_push(name_stack, copy)
if not push_result:
_lib.X509_NAME_free(copy)
_raise_current_error()
except:
_lib.sk_X509_NAME_free(name_stack)
raise
_lib.SSL_CTX_set_client_CA_list(self._context, name_stack)
def add_client_ca(self, certificate_authority):
"""
Add the CA certificate to the list of preferred signers for this context.
The list of certificate authorities will be sent to the client when the
server requests a client certificate.
:param certificate_authority: certificate authority's X509 certificate.
:return: None
"""
if not isinstance(certificate_authority, X509):
raise TypeError("certificate_authority must be an X509 instance")
add_result = _lib.SSL_CTX_add_client_CA(
self._context, certificate_authority._x509)
if not add_result:
# TODO: This is untested.
_raise_current_error()
def set_timeout(self, timeout):
"""
Set session timeout
:param timeout: The timeout in seconds
:return: The previous session timeout
"""
if not isinstance(timeout, integer_types):
raise TypeError("timeout must be an integer")
return _lib.SSL_CTX_set_timeout(self._context, timeout)
def get_timeout(self):
"""
Get the session timeout
:return: The session timeout
"""
return _lib.SSL_CTX_get_timeout(self._context)
def set_info_callback(self, callback):
"""
Set the info callback
:param callback: The Python callback to use
:return: None
"""
@wraps(callback)
def wrapper(ssl, where, return_code):
callback(Connection._reverse_mapping[ssl], where, return_code)
self._info_callback = _ffi.callback(
"void (*)(const SSL *, int, int)", wrapper)
_lib.SSL_CTX_set_info_callback(self._context, self._info_callback)
def get_app_data(self):
"""
Get the application data (supplied via set_app_data())
:return: The application data
"""
return self._app_data
def set_app_data(self, data):
"""
Set the application data (will be returned from get_app_data())
:param data: Any Python object
:return: None
"""
self._app_data = data
def get_cert_store(self):
"""
Get the certificate store for the context.
:return: A X509Store object or None if it does not have one.
"""
store = _lib.SSL_CTX_get_cert_store(self._context)
if store == _ffi.NULL:
# TODO: This is untested.
return None
pystore = X509Store.__new__(X509Store)
pystore._store = store
return pystore
def set_options(self, options):
"""
Add options. Options set before are not cleared!
:param options: The options to add.
:return: The new option bitmask.
"""
if not isinstance(options, integer_types):
raise TypeError("options must be an integer")
return _lib.SSL_CTX_set_options(self._context, options)
def set_mode(self, mode):
"""
Add modes via bitmask. Modes set before are not cleared!
:param mode: The mode to add.
:return: The new mode bitmask.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
return _lib.SSL_CTX_set_mode(self._context, mode)
def set_tlsext_servername_callback(self, callback):
"""
Specify a callback function to be called when clients specify a server name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
"""
@wraps(callback)
def wrapper(ssl, alert, arg):
callback(Connection._reverse_mapping[ssl])
return 0
self._tlsext_servername_callback = _ffi.callback(
"int (*)(const SSL *, int *, void *)", wrapper)
_lib.SSL_CTX_set_tlsext_servername_callback(
self._context, self._tlsext_servername_callback)
@_requires_npn
def set_npn_advertise_callback(self, callback):
"""
Specify a callback function that will be called when offering `Next
Protocol Negotiation
<https://technotes.googlecode.com/git/nextprotoneg.html>`_ as a server.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance. It should return a list of
bytestrings representing the advertised protocols, like
``[b'http/1.1', b'spdy/2']``.
"""
self._npn_advertise_helper = _NpnAdvertiseHelper(callback)
self._npn_advertise_callback = self._npn_advertise_helper.callback
_lib.SSL_CTX_set_next_protos_advertised_cb(
self._context, self._npn_advertise_callback, _ffi.NULL)
@_requires_npn
def set_npn_select_callback(self, callback):
"""
Specify a callback function that will be called when a server offers
Next Protocol Negotiation options.
:param callback: The callback function. It will be invoked with two
arguments: the Connection, and a list of offered protocols as
bytestrings, e.g. ``[b'http/1.1', b'spdy/2']``. It should return
one of those bytestrings, the chosen protocol.
"""
self._npn_select_helper = _NpnSelectHelper(callback)
self._npn_select_callback = self._npn_select_helper.callback
_lib.SSL_CTX_set_next_proto_select_cb(
self._context, self._npn_select_callback, _ffi.NULL)
@_requires_alpn
def set_alpn_protos(self, protos):
"""
Specify the clients ALPN protocol list.
These protocols are offered to the server during protocol negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
input_str_len = _ffi.cast("unsigned", len(protostr))
_lib.SSL_CTX_set_alpn_protos(self._context, input_str, input_str_len)
@_requires_alpn
def set_alpn_select_callback(self, callback):
"""
Set the callback to handle ALPN protocol choice.
:param callback: The callback function. It will be invoked with two
arguments: the Connection, and a list of offered protocols as
bytestrings, e.g ``[b'http/1.1', b'spdy/2']``. It should return
one of those bytestrings, the chosen protocol.
"""
self._alpn_select_helper = _ALPNSelectHelper(callback)
self._alpn_select_callback = self._alpn_select_helper.callback
_lib.SSL_CTX_set_alpn_select_cb(
self._context, self._alpn_select_callback, _ffi.NULL)
ContextType = Context
class Connection(object):
"""
"""
_reverse_mapping = WeakValueDictionary()
def __init__(self, context, socket=None):
"""
Create a new Connection object, using the given OpenSSL.SSL.Context
instance and socket.
:param context: An SSL Context to use for this connection
:param socket: The socket to use for transport layer
"""
if not isinstance(context, Context):
raise TypeError("context must be a Context instance")
ssl = _lib.SSL_new(context._context)
self._ssl = _ffi.gc(ssl, _lib.SSL_free)
self._context = context
# References to strings used for Next Protocol Negotiation. OpenSSL's
# header files suggest that these might get copied at some point, but
# doesn't specify when, so we store them here to make sure they don't
# get freed before OpenSSL uses them.
self._npn_advertise_callback_args = None
self._npn_select_callback_args = None
# References to strings used for Application Layer Protocol
# Negotiation. These strings get copied at some point but it's well
# after the callback returns, so we have to hang them somewhere to
# avoid them getting freed.
self._alpn_select_callback_args = None
self._reverse_mapping[self._ssl] = self
if socket is None:
self._socket = None
# Don't set up any gc for these, SSL_free will take care of them.
self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem())
self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem())
if self._into_ssl == _ffi.NULL or self._from_ssl == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
_lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl)
else:
self._into_ssl = None
self._from_ssl = None
self._socket = socket
set_result = _lib.SSL_set_fd(self._ssl, _asFileDescriptor(self._socket))
if not set_result:
# TODO: This is untested.
_raise_current_error()
def __getattr__(self, name):
"""
Look up attributes on the wrapped socket object if they are not found on
the Connection object.
"""
return getattr(self._socket, name)
def _raise_ssl_error(self, ssl, result):
if self._context._verify_helper is not None:
self._context._verify_helper.raise_if_problem()
if self._context._npn_advertise_helper is not None:
self._context._npn_advertise_helper.raise_if_problem()
if self._context._npn_select_helper is not None:
self._context._npn_select_helper.raise_if_problem()
if self._context._alpn_select_helper is not None:
self._context._alpn_select_helper.raise_if_problem()
error = _lib.SSL_get_error(ssl, result)
if error == _lib.SSL_ERROR_WANT_READ:
raise WantReadError()
elif error == _lib.SSL_ERROR_WANT_WRITE:
raise WantWriteError()
elif error == _lib.SSL_ERROR_ZERO_RETURN:
raise ZeroReturnError()
elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP:
# TODO: This is untested.
raise WantX509LookupError()
elif error == _lib.SSL_ERROR_SYSCALL:
if _lib.ERR_peek_error() == 0:
if result < 0:
if platform == "win32":
errno = _ffi.getwinerror()[0]
else:
errno = _ffi.errno
raise SysCallError(errno, errorcode.get(errno))
else:
raise SysCallError(-1, "Unexpected EOF")
else:
# TODO: This is untested.
_raise_current_error()
elif error == _lib.SSL_ERROR_NONE:
pass
else:
_raise_current_error()
def get_context(self):
"""
Get session context
"""
return self._context
def set_context(self, context):
"""
Switch this connection to a new session context
:param context: A :py:class:`Context` instance giving the new session
context to use.
"""
if not isinstance(context, Context):
raise TypeError("context must be a Context instance")
_lib.SSL_set_SSL_CTX(self._ssl, context._context)
self._context = context
def get_servername(self):
"""
Retrieve the servername extension value if provided in the client hello
message, or None if there wasn't one.
:return: A byte string giving the server name or :py:data:`None`.
"""
name = _lib.SSL_get_servername(self._ssl, _lib.TLSEXT_NAMETYPE_host_name)
if name == _ffi.NULL:
return None
return _ffi.string(name)
def set_tlsext_host_name(self, name):
"""
Set the value of the servername extension to send in the client hello.
:param name: A byte string giving the name.
"""
if not isinstance(name, bytes):
raise TypeError("name must be a byte string")
elif b"\0" in name:
raise TypeError("name must not contain NUL byte")
# XXX I guess this can fail sometimes?
_lib.SSL_set_tlsext_host_name(self._ssl, name)
def pending(self):
"""
Get the number of bytes that can be safely read from the connection
:return: The number of bytes available in the receive buffer.
"""
return _lib.SSL_pending(self._ssl)
def send(self, buf, flags=0):
"""
Send data on the connection. NOTE: If you get one of the WantRead,
WantWrite or WantX509Lookup exceptions on this, you have to call the
method again with the SAME buffer.
:param buf: The string, buffer or memoryview to send
:param flags: (optional) Included for compatibility with the socket
API, the value is ignored
:return: The number of bytes written
"""
# Backward compatibility
buf = _text_to_bytes_and_warn("buf", buf)
if isinstance(buf, _memoryview):
buf = buf.tobytes()
if isinstance(buf, _buffer):
buf = str(buf)
if not isinstance(buf, bytes):
raise TypeError("data must be a memoryview, buffer or byte string")
result = _lib.SSL_write(self._ssl, buf, len(buf))
self._raise_ssl_error(self._ssl, result)
return result
write = send
def sendall(self, buf, flags=0):
"""
Send "all" data on the connection. This calls send() repeatedly until
all data is sent. If an error occurs, it's impossible to tell how much
data has been sent.
:param buf: The string, buffer or memoryview to send
:param flags: (optional) Included for compatibility with the socket
API, the value is ignored
:return: The number of bytes written
"""
buf = _text_to_bytes_and_warn("buf", buf)
if isinstance(buf, _memoryview):
buf = buf.tobytes()
if isinstance(buf, _buffer):
buf = str(buf)
if not isinstance(buf, bytes):
raise TypeError("buf must be a memoryview, buffer or byte string")
left_to_send = len(buf)
total_sent = 0
data = _ffi.new("char[]", buf)
while left_to_send:
result = _lib.SSL_write(self._ssl, data + total_sent, left_to_send)
self._raise_ssl_error(self._ssl, result)
total_sent += result
left_to_send -= result
def recv(self, bufsiz, flags=None):
"""
Receive data on the connection. NOTE: If you get one of the WantRead,
WantWrite or WantX509Lookup exceptions on this, you have to call the
method again with the SAME buffer.
:param bufsiz: The maximum number of bytes to read
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The string read from the Connection
"""
buf = _ffi.new("char[]", bufsiz)
if flags is not None and flags & socket.MSG_PEEK:
result = _lib.SSL_peek(self._ssl, buf, bufsiz)
else:
result = _lib.SSL_read(self._ssl, buf, bufsiz)
self._raise_ssl_error(self._ssl, result)
return _ffi.buffer(buf, result)[:]
read = recv
def recv_into(self, buffer, nbytes=None, flags=None):
"""
Receive data on the connection and store the data into a buffer rather
than creating a new string.
:param buffer: The buffer to copy into.
:param nbytes: (optional) The maximum number of bytes to read into the
buffer. If not present, defaults to the size of the buffer. If
larger than the size of the buffer, is reduced to the size of the
buffer.
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The number of bytes read into the buffer.
"""
if nbytes is None:
nbytes = len(buffer)
else:
nbytes = min(nbytes, len(buffer))
# We need to create a temporary buffer. This is annoying, it would be
# better if we could pass memoryviews straight into the SSL_read call,
# but right now we can't. Revisit this if CFFI gets that ability.
buf = _ffi.new("char[]", nbytes)
if flags is not None and flags & socket.MSG_PEEK:
result = _lib.SSL_peek(self._ssl, buf, nbytes)
else:
result = _lib.SSL_read(self._ssl, buf, nbytes)
self._raise_ssl_error(self._ssl, result)
# This strange line is all to avoid a memory copy. The buffer protocol
# should allow us to assign a CFFI buffer to the LHS of this line, but
# on CPython 3.3+ that segfaults. As a workaround, we can temporarily
# wrap it in a memoryview, except on Python 2.6 which doesn't have a
# memoryview type.
try:
buffer[:result] = memoryview(_ffi.buffer(buf, result))
except NameError:
buffer[:result] = _ffi.buffer(buf, result)
return result
def _handle_bio_errors(self, bio, result):
if _lib.BIO_should_retry(bio):
if _lib.BIO_should_read(bio):
raise WantReadError()
elif _lib.BIO_should_write(bio):
# TODO: This is untested.
raise WantWriteError()
elif _lib.BIO_should_io_special(bio):
# TODO: This is untested. I think io_special means the socket
# BIO has a not-yet connected socket.
raise ValueError("BIO_should_io_special")
else:
# TODO: This is untested.
raise ValueError("unknown bio failure")
else:
# TODO: This is untested.
_raise_current_error()
def bio_read(self, bufsiz):
"""
When using non-socket connections this function reads the "dirty" data
that would have traveled away on the network.
:param bufsiz: The maximum number of bytes to read
:return: The string read.
"""
if self._from_ssl is None:
raise TypeError("Connection sock was not None")
if not isinstance(bufsiz, integer_types):
raise TypeError("bufsiz must be an integer")
buf = _ffi.new("char[]", bufsiz)
result = _lib.BIO_read(self._from_ssl, buf, bufsiz)
if result <= 0:
self._handle_bio_errors(self._from_ssl, result)
return _ffi.buffer(buf, result)[:]
def bio_write(self, buf):
"""
When using non-socket connections this function sends "dirty" data that
would have traveled in on the network.
:param buf: The string to put into the memory BIO.
:return: The number of bytes written
"""
buf = _text_to_bytes_and_warn("buf", buf)
if self._into_ssl is None:
raise TypeError("Connection sock was not None")
result = _lib.BIO_write(self._into_ssl, buf, len(buf))
if result <= 0:
self._handle_bio_errors(self._into_ssl, result)
return result
def renegotiate(self):
"""
Renegotiate the session
:return: True if the renegotiation can be started, false otherwise
"""
def do_handshake(self):
"""
Perform an SSL handshake (usually called after renegotiate() or one of
set_*_state()). This can raise the same exceptions as send and recv.
:return: None.
"""
result = _lib.SSL_do_handshake(self._ssl)
self._raise_ssl_error(self._ssl, result)
def renegotiate_pending(self):
"""
Check if there's a renegotiation in progress, it will return false once
a renegotiation is finished.
:return: Whether there's a renegotiation in progress
"""
def total_renegotiations(self):
"""
Find out the total number of renegotiations.
:return: The number of renegotiations.
"""
return _lib.SSL_total_renegotiations(self._ssl)
def connect(self, addr):
"""
Connect to remote host and set up client-side SSL
:param addr: A remote address
:return: What the socket's connect method returns
"""
_lib.SSL_set_connect_state(self._ssl)
return self._socket.connect(addr)
def connect_ex(self, addr):
"""
Connect to remote host and set up client-side SSL. Note that if the socket's
connect_ex method doesn't return 0, SSL won't be initialized.
:param addr: A remove address
:return: What the socket's connect_ex method returns
"""
connect_ex = self._socket.connect_ex
self.set_connect_state()
return connect_ex(addr)
def accept(self):
"""
Accept incoming connection and set up SSL on it
:return: A (conn,addr) pair where conn is a Connection and addr is an
address
"""
client, addr = self._socket.accept()
conn = Connection(self._context, client)
conn.set_accept_state()
return (conn, addr)
def bio_shutdown(self):
"""
When using non-socket connections this function signals end of
data on the input for this connection.
:return: None
"""
if self._from_ssl is None:
raise TypeError("Connection sock was not None")
_lib.BIO_set_mem_eof_return(self._into_ssl, 0)
def shutdown(self):
"""
Send closure alert
:return: True if the shutdown completed successfully (i.e. both sides
have sent closure alerts), false otherwise (i.e. you have to
wait for a ZeroReturnError on a recv() method call
"""
result = _lib.SSL_shutdown(self._ssl)
if result < 0:
self._raise_ssl_error(self._ssl, result)
elif result > 0:
return True
else:
return False
def get_cipher_list(self):
"""
Get the session cipher list
:return: A list of cipher strings
"""
ciphers = []
for i in count():
result = _lib.SSL_get_cipher_list(self._ssl, i)
if result == _ffi.NULL:
break
ciphers.append(_native(_ffi.string(result)))
return ciphers
def get_client_ca_list(self):
"""
Get CAs whose certificates are suggested for client authentication.
:return: If this is a server connection, a list of X509Names representing
the acceptable CAs as set by :py:meth:`OpenSSL.SSL.Context.set_client_ca_list` or
:py:meth:`OpenSSL.SSL.Context.add_client_ca`. If this is a client connection,
the list of such X509Names sent by the server, or an empty list if that
has not yet happened.
"""
ca_names = _lib.SSL_get_client_CA_list(self._ssl)
if ca_names == _ffi.NULL:
# TODO: This is untested.
return []
result = []
for i in range(_lib.sk_X509_NAME_num(ca_names)):
name = _lib.sk_X509_NAME_value(ca_names, i)
copy = _lib.X509_NAME_dup(name)
if copy == _ffi.NULL:
# TODO: This is untested.
_raise_current_error()
pyname = X509Name.__new__(X509Name)
pyname._name = _ffi.gc(copy, _lib.X509_NAME_free)
result.append(pyname)
return result
def makefile(self):
"""
The makefile() method is not implemented, since there is no dup semantics
for SSL connections
:raise: NotImplementedError
"""
raise NotImplementedError("Cannot make file object of OpenSSL.SSL.Connection")
def get_app_data(self):
"""
Get application data
:return: The application data
"""
return self._app_data
def set_app_data(self, data):
"""
Set application data
:param data - The application data
:return: None
"""
self._app_data = data
def get_shutdown(self):
"""
Get shutdown state
:return: The shutdown state, a bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.
"""
return _lib.SSL_get_shutdown(self._ssl)
def set_shutdown(self, state):
"""
Set shutdown state
:param state - bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.
:return: None
"""
if not isinstance(state, integer_types):
raise TypeError("state must be an integer")
_lib.SSL_set_shutdown(self._ssl, state)
def state_string(self):
"""
Get a verbose state description
:return: A string representing the state
"""
def server_random(self):
"""
Get a copy of the server hello nonce.
:return: A string representing the state
"""
if self._ssl.session == _ffi.NULL:
return None
return _ffi.buffer(
self._ssl.s3.server_random,
_lib.SSL3_RANDOM_SIZE)[:]
def client_random(self):
"""
Get a copy of the client hello nonce.
:return: A string representing the state
"""
if self._ssl.session == _ffi.NULL:
return None
return _ffi.buffer(
self._ssl.s3.client_random,
_lib.SSL3_RANDOM_SIZE)[:]
def master_key(self):
"""
Get a copy of the master key.
:return: A string representing the state
"""
if self._ssl.session == _ffi.NULL:
return None
return _ffi.buffer(
self._ssl.session.master_key,
self._ssl.session.master_key_length)[:]
def sock_shutdown(self, *args, **kwargs):
"""
See shutdown(2)
:return: What the socket's shutdown() method returns
"""
return self._socket.shutdown(*args, **kwargs)
def get_peer_certificate(self):
"""
Retrieve the other side's certificate (if any)
:return: The peer's certificate
"""
cert = _lib.SSL_get_peer_certificate(self._ssl)
if cert != _ffi.NULL:
pycert = X509.__new__(X509)
pycert._x509 = _ffi.gc(cert, _lib.X509_free)
return pycert
return None
def get_peer_cert_chain(self):
"""
Retrieve the other side's certificate (if any)
:return: A list of X509 instances giving the peer's certificate chain,
or None if it does not have one.
"""
cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl)
if cert_stack == _ffi.NULL:
return None
result = []
for i in range(_lib.sk_X509_num(cert_stack)):
# TODO could incref instead of dup here
cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i))
pycert = X509.__new__(X509)
pycert._x509 = _ffi.gc(cert, _lib.X509_free)
result.append(pycert)
return result
def want_read(self):
"""
Checks if more data has to be read from the transport layer to complete an
operation.
:return: True iff more data has to be read
"""
return _lib.SSL_want_read(self._ssl)
def want_write(self):
"""
Checks if there is data to write to the transport layer to complete an
operation.
:return: True iff there is data to write
"""
return _lib.SSL_want_write(self._ssl)
def set_accept_state(self):
"""
Set the connection to work in server mode. The handshake will be handled
automatically by read/write.
:return: None
"""
_lib.SSL_set_accept_state(self._ssl)
def set_connect_state(self):
"""
Set the connection to work in client mode. The handshake will be handled
automatically by read/write.
:return: None
"""
_lib.SSL_set_connect_state(self._ssl)
def get_session(self):
"""
Returns the Session currently used.
@return: An instance of :py:class:`OpenSSL.SSL.Session` or :py:obj:`None` if
no session exists.
"""
session = _lib.SSL_get1_session(self._ssl)
if session == _ffi.NULL:
return None
pysession = Session.__new__(Session)
pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free)
return pysession
def set_session(self, session):
"""
Set the session to be used when the TLS/SSL connection is established.
:param session: A Session instance representing the session to use.
:returns: None
"""
if not isinstance(session, Session):
raise TypeError("session must be a Session instance")
result = _lib.SSL_set_session(self._ssl, session._session)
if not result:
_raise_current_error()
def _get_finished_message(self, function):
"""
Helper to implement :py:meth:`get_finished` and
:py:meth:`get_peer_finished`.
:param function: Either :py:data:`SSL_get_finished`: or
:py:data:`SSL_get_peer_finished`.
:return: :py:data:`None` if the desired message has not yet been
received, otherwise the contents of the message.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
# The OpenSSL documentation says nothing about what might happen if the
# count argument given is zero. Specifically, it doesn't say whether
# the output buffer may be NULL in that case or not. Inspection of the
# implementation reveals that it calls memcpy() unconditionally.
# Section 7.1.4, paragraph 1 of the C standard suggests that
# memcpy(NULL, source, 0) is not guaranteed to produce defined (let
# alone desirable) behavior (though it probably does on just about
# every implementation...)
#
# Allocate a tiny buffer to pass in (instead of just passing NULL as
# one might expect) for the initial call so as to be safe against this
# potentially undefined behavior.
empty = _ffi.new("char[]", 0)
size = function(self._ssl, empty, 0)
if size == 0:
# No Finished message so far.
return None
buf = _ffi.new("char[]", size)
function(self._ssl, buf, size)
return _ffi.buffer(buf, size)[:]
def get_finished(self):
"""
Obtain the latest `handshake finished` message sent to the peer.
:return: The contents of the message or :py:obj:`None` if the TLS
handshake has not yet completed.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
return self._get_finished_message(_lib.SSL_get_finished)
def get_peer_finished(self):
"""
Obtain the latest `handshake finished` message received from the peer.
:return: The contents of the message or :py:obj:`None` if the TLS
handshake has not yet completed.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
return self._get_finished_message(_lib.SSL_get_peer_finished)
def get_cipher_name(self):
"""
Obtain the name of the currently used cipher.
:returns: The name of the currently used cipher or :py:obj:`None`
if no connection has been established.
:rtype: :py:class:`unicode` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
name = _ffi.string(_lib.SSL_CIPHER_get_name(cipher))
return name.decode("utf-8")
def get_cipher_bits(self):
"""
Obtain the number of secret bits of the currently used cipher.
:returns: The number of secret bits of the currently used cipher
or :py:obj:`None` if no connection has been established.
:rtype: :py:class:`int` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
return _lib.SSL_CIPHER_get_bits(cipher, _ffi.NULL)
def get_cipher_version(self):
"""
Obtain the protocol version of the currently used cipher.
:returns: The protocol name of the currently used cipher
or :py:obj:`None` if no connection has been established.
:rtype: :py:class:`unicode` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
version =_ffi.string(_lib.SSL_CIPHER_get_version(cipher))
return version.decode("utf-8")
def get_protocol_version_name(self):
"""
Obtain the protocol version of the current connection.
:returns: The TLS version of the current connection, for example
the value for TLS 1.2 would be ``TLSv1.2``or ``Unknown``
for connections that were not successfully established.
:rtype: :py:class:`unicode`
"""
version = _ffi.string(_lib.SSL_get_version(self._ssl))
return version.decode("utf-8")
def get_protocol_version(self):
"""
Obtain the protocol version of the current connection.
:returns: The TLS version of the current connection, for example
the value for TLS 1 would be 0x769.
:rtype: :py:class:`int`
"""
version = _lib.SSL_version(self._ssl)
return version
@_requires_npn
def get_next_proto_negotiated(self):
"""
Get the protocol that was negotiated by NPN.
"""
data = _ffi.new("unsigned char **")
data_len = _ffi.new("unsigned int *")
_lib.SSL_get0_next_proto_negotiated(self._ssl, data, data_len)
return _ffi.buffer(data[0], data_len[0])[:]
@_requires_alpn
def set_alpn_protos(self, protos):
"""
Specify the client's ALPN protocol list.
These protocols are offered to the server during protocol negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
input_str_len = _ffi.cast("unsigned", len(protostr))
_lib.SSL_set_alpn_protos(self._ssl, input_str, input_str_len)
def get_alpn_proto_negotiated(self):
"""
Get the protocol that was negotiated by ALPN.
"""
if not _lib.Cryptography_HAS_ALPN:
raise NotImplementedError("ALPN not available")
data = _ffi.new("unsigned char **")
data_len = _ffi.new("unsigned int *")
_lib.SSL_get0_alpn_selected(self._ssl, data, data_len)
if not data_len:
return b''
return _ffi.buffer(data[0], data_len[0])[:]
ConnectionType = Connection
# This is similar to the initialization calls at the end of OpenSSL/crypto.py
# but is exercised mostly by the Context initializer.
_lib.SSL_library_init()
| sorenh/pyopenssl | OpenSSL/SSL.py | Python | apache-2.0 | 64,003 |
# -*- coding: utf-8 -*-
import re
import time
import traceback
from module.plugins.internal.Hook import Hook
from module.utils import decode, remove_chars
class MultiHook(Hook):
__name__ = "MultiHook"
__type__ = "hook"
__version__ = "0.54"
__status__ = "testing"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Hook plugin for multi hoster/crypter"""
__license__ = "GPLv3"
__authors__ = [("pyLoad Team" , "admin@pyload.org" ),
("Walter Purcaro", "vuolter@gmail.com")]
MIN_RELOAD_INTERVAL = 1 * 60 * 60 #: 1 hour
DOMAIN_REPLACEMENTS = [(r'180upload\.com' , "hundredeightyupload.com"),
(r'bayfiles\.net' , "bayfiles.com" ),
(r'cloudnator\.com' , "shragle.com" ),
(r'dfiles\.eu' , "depositfiles.com" ),
(r'easy-share\.com' , "crocko.com" ),
(r'freakshare\.net' , "freakshare.com" ),
(r'hellshare\.com' , "hellshare.cz" ),
(r'ifile\.it' , "filecloud.io" ),
(r'nowdownload\.\w+', "nowdownload.sx" ),
(r'nowvideo\.\w+' , "nowvideo.sx" ),
(r'putlocker\.com' , "firedrive.com" ),
(r'share-?rapid\.cz', "multishare.cz" ),
(r'ul\.to' , "uploaded.to" ),
(r'uploaded\.net' , "uploaded.to" ),
(r'uploadhero\.co' , "uploadhero.com" ),
(r'zshares\.net' , "zshare.net" ),
(r'^1' , "one" ),
(r'^2' , "two" ),
(r'^3' , "three" ),
(r'^4' , "four" ),
(r'^5' , "five" ),
(r'^6' , "six" ),
(r'^7' , "seven" ),
(r'^8' , "eight" ),
(r'^9' , "nine" ),
(r'^0' , "zero" )]
def init(self):
self.plugins = []
self.supported = []
self.new_supported = []
self.account = None
self.pluginclass = None
self.pluginmodule = None
self.pluginname = None
self.plugintype = None
self.init_plugin()
def init_plugin(self):
self.pluginname = self.__name__.rsplit("Hook", 1)[0]
plugin, self.plugintype = self.pyload.pluginManager.findPlugin(self.pluginname)
if plugin:
self.pluginmodule = self.pyload.pluginManager.loadModule(self.plugintype, self.pluginname)
self.pluginclass = getattr(self.pluginmodule, self.pluginname)
else:
self.log_warning(_("Hook plugin will be deactivated due missing plugin reference"))
self.set_config('activated', False)
def load_account(self):
self.account = self.pyload.accountManager.getAccountPlugin(self.pluginname)
if self.account and not self.account.select()[0]:
self.account = False
if not self.account and hasattr(self.pluginclass, "LOGIN_ACCOUNT") and self.pluginclass.LOGIN_ACCOUNT:
self.log_warning(_("Hook plugin will be deactivated due missing account reference"))
self.set_config('activated', False)
def activate(self):
self.init_periodical(threaded=True)
def plugins_cached(self):
if self.plugins:
return self.plugins
for _i in xrange(5):
try:
pluginset = self._plugin_set(self.get_hosters())
break
except Exception, e:
self.log_warning(e, _("Waiting 1 minute and retry"))
time.sleep(60)
else:
self.log_error(_("No hoster list retrieved"))
self.interval = self.MIN_RELOAD_INTERVAL
return list()
try:
configmode = self.get_config('pluginmode', 'all')
if configmode in ("listed", "unlisted"):
pluginlist = self.get_config('pluginlist', '').replace('|', ',').replace(';', ',').split(',')
configset = self._plugin_set(pluginlist)
if configmode == "listed":
pluginset &= configset
else:
pluginset -= configset
except Exception, e:
self.log_error(e)
self.plugins = list(pluginset)
return self.plugins
def _plugin_set(self, plugins):
regexp = re.compile(r'^[\w\-.^_]{3,63}\.[a-zA-Z]{2,}$', re.U)
plugins = [decode(p.strip()).lower() for p in plugins if regexp.match(p.strip())]
for r in self.DOMAIN_REPLACEMENTS:
rf, rt = r
repr = re.compile(rf, re.I|re.U)
plugins = [re.sub(rf, rt, p) if repr.match(p) else p for p in plugins]
return set(plugins)
def get_hosters(self):
"""
Load list of supported hoster
:return: List of domain names
"""
raise NotImplementedError
def periodical(self):
"""
Reload plugin list periodically
"""
self.load_account()
if self.get_config('reload', True):
self.interval = max(self.get_config('reloadinterval', 12) * 60 * 60, self.MIN_RELOAD_INTERVAL)
else:
self.pyload.scheduler.removeJob(self.cb)
self.cb = None
self.log_info(_("Reloading supported %s list") % self.plugintype)
old_supported = self.supported
self.supported = []
self.new_supported = []
self.plugins = []
self.override_plugins()
old_supported = [plugin for plugin in old_supported if plugin not in self.supported]
if old_supported:
self.log_debug("Unload: %s" % ", ".join(old_supported))
for plugin in old_supported:
self.unload_plugin(plugin)
def override_plugins(self):
excludedList = []
if self.plugintype == "hoster":
pluginMap = dict((name.lower(), name) for name in self.pyload.pluginManager.hosterPlugins.keys())
accountList = [account.type.lower() for account in self.pyload.api.getAccounts(False) if account.valid and account.premium]
else:
pluginMap = {}
accountList = [name[::-1].replace("Folder"[::-1], "", 1).lower()[::-1] for name in self.pyload.pluginManager.crypterPlugins.keys()]
for plugin in self.plugins_cached():
name = remove_chars(plugin, "-.")
if name in accountList:
excludedList.append(plugin)
else:
if name in pluginMap:
self.supported.append(pluginMap[name])
else:
self.new_supported.append(plugin)
if not self.supported and not self.new_supported:
self.log_error(_("No %s loaded") % self.plugintype)
return
#: Inject plugin plugin
self.log_debug("Overwritten %ss: %s" % (self.plugintype, ", ".join(sorted(self.supported))))
for plugin in self.supported:
hdict = self.pyload.pluginManager.plugins[self.plugintype][plugin]
hdict['new_module'] = self.pluginmodule
hdict['new_name'] = self.pluginname
if excludedList:
self.log_info(_("%ss not overwritten: %s") % (self.plugintype.capitalize(), ", ".join(sorted(excludedList))))
if self.new_supported:
plugins = sorted(self.new_supported)
self.log_debug("New %ss: %s" % (self.plugintype, ", ".join(plugins)))
#: Create new regexp
regexp = r'.*(?P<DOMAIN>%s).*' % "|".join(x.replace('.', '\.') for x in plugins)
if hasattr(self.pluginclass, "__pattern__") and isinstance(self.pluginclass.__pattern__, basestring) and "://" in self.pluginclass.__pattern__:
regexp = r'%s|%s' % (self.pluginclass.__pattern__, regexp)
self.log_debug("Regexp: %s" % regexp)
hdict = self.pyload.pluginManager.plugins[self.plugintype][self.pluginname]
hdict['pattern'] = regexp
hdict['re'] = re.compile(regexp)
def unload_plugin(self, plugin):
hdict = self.pyload.pluginManager.plugins[self.plugintype][plugin]
if "module" in hdict:
hdict.pop('module', None)
if "new_module" in hdict:
hdict.pop('new_module', None)
hdict.pop('new_name', None)
def deactivate(self):
"""
Remove override for all plugins. Scheduler job is removed by hookmanager
"""
for plugin in self.supported:
self.unload_plugin(plugin)
#: Reset pattern
hdict = self.pyload.pluginManager.plugins[self.plugintype][self.pluginname]
hdict['pattern'] = getattr(self.pluginclass, "__pattern__", r'^unmatchable$')
hdict['re'] = re.compile(hdict['pattern'])
| fayf/pyload | module/plugins/internal/MultiHook.py | Python | gpl-3.0 | 9,986 |
from pythonforandroid.toolchain import Recipe, shprint, get_directory, current_directory, ArchAndroid
from os.path import exists, join
from os import uname
import glob
import sh
class Python3Recipe(Recipe):
version = '3.4.2'
url = 'http://python.org/ftp/python/{version}/Python-{version}.tgz'
name = 'python3'
depends = ['hostpython3']
conflicts = ['python2']
def prebuild_armeabi(self):
build_dir = self.get_build_container_dir('armeabi')
if exists(join(build_dir, '.patched')):
print('Python3 already patched, skipping.')
return
self.apply_patch(join('patches', 'python-{version}-android-libmpdec.patch'.format(version=self.version)))
self.apply_patch(join('patches', 'python-{version}-android-locale.patch'.format(version=self.version)))
self.apply_patch(join('patches', 'python-{version}-android-misc.patch'.format(version=self.version)))
# self.apply_patch(join('patches', 'python-{version}-android-missing-getdents64-definition.patch'.format(version=self.version)))
self.apply_patch(join('patches', 'python-{version}-cross-compile.patch'.format(version=self.version)))
self.apply_patch(join('patches', 'python-{version}-python-misc.patch'.format(version=self.version)))
shprint(sh.touch, join(build_dir, '.patched'))
def build_armeabi(self):
if 'sqlite' in self.ctx.recipe_build_order or 'openssl' in self.ctx.recipe_build_order:
print('sqlite or openssl support not yet enabled in python recipe')
exit(1)
hostpython_recipe = Recipe.get_recipe('hostpython3', self.ctx)
shprint(sh.cp, self.ctx.hostpython, self.get_build_dir('armeabi'))
shprint(sh.cp, self.ctx.hostpgen, self.get_build_dir('armeabi'))
hostpython = join(self.get_build_dir('armeabi'), 'hostpython')
hostpgen = join(self.get_build_dir('armeabi'), 'hostpython')
if exists(join(self.get_build_dir('armeabi'), 'libpython3.4m.so')):
print('libpython3.4m.so already exists, skipping python build.')
self.ctx.hostpython = join(self.ctx.build_dir, 'python-install',
'bin', 'python.host')
return
with current_directory(self.get_build_dir('armeabi')):
hostpython_recipe = Recipe.get_recipe('hostpython3', self.ctx)
# shprint(sh.cp, join(hostpython_recipe.get_recipe_dir(), 'Setup'), 'Modules')
env = ArchAndroid(self.ctx).get_env()
# AND: Should probably move these to get_recipe_env for
# neatness, but the whole recipe needs tidying along these
# lines
env['HOSTARCH'] = 'arm-eabi'
env['BUILDARCH'] = shprint(sh.gcc, '-dumpmachine').stdout
# env['CFLAGS'] = ' '.join([env['CFLAGS'], '-DNO_MALLINFO'])
env['HOSTARCH'] = 'arm-linux-androideabi'
env['BUILDARCH'] = 'x86_64-pc-linux-gnu'
configure = sh.Command('./configure')
# AND: OFLAG isn't actually set, should it be?
# shprint(configure,
# '--host={}'.format(env['HOSTARCH']),
# '--build={}'.format(env['BUILDARCH']),
# # 'OPT={}'.format(env['OFLAG']),
# '--prefix={}'.format(join(self.ctx.build_dir, 'python-install')),
# '--enable-shared',
# '--disable-toolbox-glue',
# '--disable-framefork',
# _env=env)
with open('config.site', 'w') as fileh:
fileh.write('''
ac_cv_file__dev_ptmx=no
ac_cv_file__dev_ptc=no
''')
shprint(configure,
'CROSS_COMPILE_TARGET=yes',
'HOSTPYTHON={}'.format(hostpython),
'CONFIG_SITE=config.site',
'--prefix={}'.format(join(self.ctx.build_dir, 'python-install')),
'--host={}'.format(env['HOSTARCH']),
'--build={}'.format(env['BUILDARCH']),
'--disable-ipv6',
'--enable-shared',
'--without-ensurepip',
_env=env)
# The python2 build always fails the first time, but python3 seems to work.
make = sh.Command(env['MAKE'].split(' ')[0])
# print('First install (expected to fail...')
# try:
# shprint(make, '-j5', 'install', 'HOSTPYTHON={}'.format(hostpython),
# 'HOSTPGEN={}'.format(hostpgen),
# 'CROSS_COMPILE_TARGET=yes',
# 'INSTSONAME=libpython3.7.so',
# _env=env)
# except sh.ErrorReturnCode_2:
# print('First python3 make failed. This is expected, trying again.')
# print('Second install (expected to work)')
shprint(sh.touch, 'python.exe', 'python')
# shprint(make, '-j5', 'install', 'HOSTPYTHON={}'.format(hostpython),
# 'HOSTPGEN={}'.format(hostpgen),
# 'CROSS_COMPILE_TARGET=yes',
# 'INSTSONAME=libpython3.7.so',
# _env=env)
shprint(make, '-j5',
'CROSS_COMPILE_TARGET=yes',
'HOSTPYTHON={}'.format(hostpython),
'HOSTPGEN={}'.format(hostpgen),
_env=env)
shprint(make, '-j5', 'install',
'CROSS_COMPILE_TARGET=yes',
'HOSTPYTHON={}'.format(hostpython),
'HOSTPGEN={}'.format(hostpgen),
_env=env)
# if uname()[0] == 'Darwin':
# shprint(sh.cp, join(self.get_recipe_dir(), 'patches', '_scproxy.py'),
# join(self.get_build_dir(), 'Lib'))
# shprint(sh.cp, join(self.get_recipe_dir(), 'patches', '_scproxy.py'),
# join(self.ctx.build_dir, 'python-install', 'lib', 'python3.7'))
print('Ready to copy .so for python arm')
shprint(sh.cp, 'libpython3.4m.so', self.ctx.libs_dir)
shprint(sh.cp, 'libpython3.so', self.ctx.libs_dir)
print('Copying hostpython binary to targetpython folder')
shprint(sh.cp, self.ctx.hostpython,
join(self.ctx.build_dir, 'python-install', 'bin',
'python.host'))
self.ctx.hostpython = join(self.ctx.build_dir, 'python-install',
'bin', 'python.host')
# reduce python?
for dir_name in ('test', join('json', 'tests'), 'lib-tk',
join('sqlite3', 'test'), join('unittest, test'),
join('lib2to3', 'tests'), join('bsddb', 'tests'),
join('distutils', 'tests'), join('email', 'test'),
'curses'):
shprint(sh.rm, '-rf', join(self.ctx.build_dir, 'python-install',
'lib', 'python3.7', dir_name))
# print('python3 build done, exiting for debug')
# exit(1)
recipe = Python3Recipe()
| lc-soft/python-for-android | pythonforandroid/recipes/python3/__init__.py | Python | mit | 7,303 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class RoleAssignmentApprovalActorIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type : user/servicePrincipal
"""
USER = "user"
SERVICE_PRINCIPAL = "servicePrincipal"
class RoleAssignmentApprovalStepReviewResult(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The decision on the approval stage. This value is initially set to NotReviewed. Approvers can
take action of Approve/Deny
"""
APPROVE = "Approve"
DENY = "Deny"
NOT_REVIEWED = "NotReviewed"
class RoleAssignmentApprovalStepStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""This read-only field specifies the status of an approval.
"""
NOT_STARTED = "NotStarted"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
EXPIRED = "Expired"
INITIALIZING = "Initializing"
ESCALATING = "Escalating"
COMPLETING = "Completing"
ESCALATED = "Escalated"
| Azure/azure-sdk-for-python | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_01_01_preview/models/_authorization_management_client_enums.py | Python | mit | 2,080 |
"""Support for the (unofficial) Tado API."""
import asyncio
from datetime import timedelta
import logging
from PyTado.interface import Tado
from requests import RequestException
import requests.exceptions
from homeassistant.components.climate.const import PRESET_AWAY, PRESET_HOME
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
from .const import (
CONF_FALLBACK,
DATA,
DOMAIN,
INSIDE_TEMPERATURE_MEASUREMENT,
SIGNAL_TADO_UPDATE_RECEIVED,
TEMP_OFFSET,
UPDATE_LISTENER,
UPDATE_TRACK,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "sensor", "climate", "water_heater"]
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=4)
SCAN_INTERVAL = timedelta(minutes=5)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Tado component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Tado from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
fallback = entry.options.get(CONF_FALLBACK, True)
tadoconnector = TadoConnector(hass, username, password, fallback)
try:
await hass.async_add_executor_job(tadoconnector.setup)
except KeyError:
_LOGGER.error("Failed to login to tado")
return False
except RuntimeError as exc:
_LOGGER.error("Failed to setup tado: %s", exc)
return ConfigEntryNotReady
except requests.exceptions.Timeout as ex:
raise ConfigEntryNotReady from ex
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
_LOGGER.error("Failed to login to tado: %s", ex)
return False
raise ConfigEntryNotReady from ex
# Do first update
await hass.async_add_executor_job(tadoconnector.update)
# Poll for updates in the background
update_track = async_track_time_interval(
hass,
lambda now: tadoconnector.update(),
SCAN_INTERVAL,
)
update_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA: tadoconnector,
UPDATE_TRACK: update_track,
UPDATE_LISTENER: update_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
if CONF_FALLBACK not in options:
options[CONF_FALLBACK] = entry.data.get(CONF_FALLBACK, True)
hass.config_entries.async_update_entry(entry, options=options)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UPDATE_TRACK]()
hass.data[DOMAIN][entry.entry_id][UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class TadoConnector:
"""An object to store the Tado data."""
def __init__(self, hass, username, password, fallback):
"""Initialize Tado Connector."""
self.hass = hass
self._username = username
self._password = password
self._fallback = fallback
self.home_id = None
self.tado = None
self.zones = None
self.devices = None
self.data = {
"device": {},
"zone": {},
}
@property
def fallback(self):
"""Return fallback flag to Smart Schedule."""
return self._fallback
def setup(self):
"""Connect to Tado and fetch the zones."""
self.tado = Tado(self._username, self._password)
self.tado.setDebugging(True)
# Load zones and devices
self.zones = self.tado.getZones()
self.devices = self.tado.getDevices()
self.home_id = self.tado.getMe()["homes"][0]["id"]
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the registered zones."""
for device in self.devices:
self.update_sensor("device", device["shortSerialNo"])
for zone in self.zones:
self.update_sensor("zone", zone["id"])
def update_sensor(self, sensor_type, sensor):
"""Update the internal data from Tado."""
_LOGGER.debug("Updating %s %s", sensor_type, sensor)
try:
if sensor_type == "device":
data = self.tado.getDeviceInfo(sensor)
if (
INSIDE_TEMPERATURE_MEASUREMENT
in data["characteristics"]["capabilities"]
):
data[TEMP_OFFSET] = self.tado.getDeviceInfo(sensor, TEMP_OFFSET)
elif sensor_type == "zone":
data = self.tado.getZoneState(sensor)
else:
_LOGGER.debug("Unknown sensor: %s", sensor_type)
return
except RuntimeError:
_LOGGER.error(
"Unable to connect to Tado while updating %s %s",
sensor_type,
sensor,
)
return
self.data[sensor_type][sensor] = data
_LOGGER.debug(
"Dispatching update to %s %s %s: %s",
self.home_id,
sensor_type,
sensor,
data,
)
dispatcher_send(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(self.home_id, sensor_type, sensor),
)
def get_capabilities(self, zone_id):
"""Return the capabilities of the devices."""
return self.tado.getCapabilities(zone_id)
def reset_zone_overlay(self, zone_id):
"""Reset the zone back to the default operation."""
self.tado.resetZoneOverlay(zone_id)
self.update_sensor("zone", zone_id)
def set_presence(
self,
presence=PRESET_HOME,
):
"""Set the presence to home or away."""
if presence == PRESET_AWAY:
self.tado.setAway()
elif presence == PRESET_HOME:
self.tado.setHome()
def set_zone_overlay(
self,
zone_id=None,
overlay_mode=None,
temperature=None,
duration=None,
device_type="HEATING",
mode=None,
fan_speed=None,
swing=None,
):
"""Set a zone overlay."""
_LOGGER.debug(
"Set overlay for zone %s: overlay_mode=%s, temp=%s, duration=%s, type=%s, mode=%s fan_speed=%s swing=%s",
zone_id,
overlay_mode,
temperature,
duration,
device_type,
mode,
fan_speed,
swing,
)
try:
self.tado.setZoneOverlay(
zone_id,
overlay_mode,
temperature,
duration,
device_type,
"ON",
mode,
fanSpeed=fan_speed,
swing=swing,
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
def set_zone_off(self, zone_id, overlay_mode, device_type="HEATING"):
"""Set a zone to off."""
try:
self.tado.setZoneOverlay(
zone_id, overlay_mode, None, None, device_type, "OFF"
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
def set_temperature_offset(self, device_id, offset):
"""Set temperature offset of device."""
try:
self.tado.setTempOffset(device_id, offset)
except RequestException as exc:
_LOGGER.error("Could not set temperature offset: %s", exc)
| partofthething/home-assistant | homeassistant/components/tado/__init__.py | Python | apache-2.0 | 8,846 |
from django.conf.urls.defaults import *
from .views import *
from .views_misc import ServerInfoView
from .views_auth import LogoutDeviceView, ClientLoginTokenView
from .endpoints.dir_shared_items import DirSharedItemsEndpoint
urlpatterns = patterns('',
url(r'^ping/$', Ping.as_view()),
url(r'^auth/ping/$', AuthPing.as_view()),
url(r'^auth-token/', ObtainAuthToken.as_view()),
url(r'^server-info/$', ServerInfoView.as_view()),
url(r'^logout-device/$', LogoutDeviceView.as_view()),
url(r'^client-login/$', ClientLoginTokenView.as_view()),
# RESTful API
url(r'^accounts/$', Accounts.as_view(), name="accounts"),
url(r'^accounts/(?P<email>\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+)/$', Account.as_view(), name="api2-account"),
url(r'^account/info/$', AccountInfo.as_view()),
url(r'^regdevice/$', RegDevice.as_view(), name="regdevice"),
url(r'^search/$', Search.as_view(), name='api_search'),
url(r'^search-user/$', SearchUser.as_view(), name='search-user'),
url(r'^repos/$', Repos.as_view(), name="api2-repos"),
url(r'^repos/public/$', PubRepos.as_view(), name="api2-pub-repos"),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/$', Repo.as_view(), name="api2-repo"),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/history/$', RepoHistory.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/download-info/$', DownloadRepo.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/owner/$', RepoOwner.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/public/$', RepoPublic.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/upload-link/$', UploadLinkView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/update-link/$', UpdateLinkView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/upload-blks-link/$', UploadBlksLinkView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9a-f]{36})/update-blks-link/$', UpdateBlksLinkView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/$', FileView.as_view(), name='FileView'),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/detail/$', FileDetailView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/history/$', FileHistory.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/revision/$', FileRevision.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/revert/$', FileRevert.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/file/shared-link/$', FileSharedLinkView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/dir/$', DirView.as_view(), name='DirView'),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/dir/sub_repo/$', DirSubRepoView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/dir/share/$', DirShareView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/dir/shared_items/$', DirSharedItemsEndpoint.as_view(), name="api2-dir-shared-items"),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/dir/download/$', DirDownloadView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/thumbnail/$', ThumbnailView.as_view(), name='api2-thumbnail'),
url(r'^starredfiles/', StarredFileView.as_view(), name='starredfiles'),
url(r'^shared-repos/$', SharedRepos.as_view(), name='sharedrepos'),
url(r'^shared-repos/(?P<repo_id>[-0-9-a-f]{36})/$', SharedRepo.as_view(), name='sharedrepo'),
url(r'^beshared-repos/$', BeShared.as_view(), name='beshared'),
url(r'^default-repo/$', DefaultRepoView.as_view(), name='api2-defaultrepo'),
url(r'^shared-links/$', SharedLinksView.as_view()),
url(r'^shared-files/$', SharedFilesView.as_view()),
url(r'^virtual-repos/$', VirtualRepos.as_view()),
url(r'^repo-tokens/$', RepoTokensView.as_view()),
url(r'^organization/$', OrganizationView.as_view()),
url(r'^s/f/(?P<token>[a-f0-9]{10})/$', PrivateSharedFileView.as_view()),
url(r'^s/f/(?P<token>[a-f0-9]{10})/detail/$', PrivateSharedFileDetailView.as_view()),
url(r'^f/(?P<token>[a-f0-9]{10})/$', SharedFileView.as_view()),
url(r'^f/(?P<token>[a-f0-9]{10})/detail/$', SharedFileDetailView.as_view()),
url(r'^d/(?P<token>[a-f0-9]{10})/dir/$', SharedDirView.as_view()),
url(r'^groupandcontacts/$', GroupAndContacts.as_view()),
url(r'^events/$', EventsView.as_view()),
url(r'^repo_history_changes/(?P<repo_id>[-0-9a-f]{36})/$', RepoHistoryChange.as_view()),
url(r'^unseen_messages/$', UnseenMessagesCountView.as_view()),
url(r'^group/msgs/(?P<group_id>\d+)/$', GroupMsgsView.as_view()),
url(r'^group/(?P<group_id>\d+)/msg/(?P<msg_id>\d+)/$', GroupMsgView.as_view()),
url(r'^user/msgs/(?P<id_or_email>[^/]+)/$', UserMsgsView.as_view()),
url(r'^new_replies/$', NewRepliesView.as_view()),
url(r'^avatars/user/(?P<user>\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+)/resized/(?P<size>[0-9]+)/$', UserAvatarView.as_view()),
url(r'^avatars/group/(?P<group_id>\d+)/resized/(?P<size>[0-9]+)/$', GroupAvatarView.as_view()),
url(r'^groups/$', Groups.as_view()),
url(r'^groups/(?P<group_id>\d+)/$', Groups.as_view()),
url(r'^groups/(?P<group_id>\d+)/members/$', GroupMembers.as_view()),
url(r'^groups/(?P<group_id>\d+)/changes/$', GroupChanges.as_view(), name="api2-group-changes"),
url(r'^groups/(?P<group_id>\d+)/repos/$', GroupRepos.as_view(), name="api2-grouprepos"),
url(r'^groups/(?P<group_id>\d+)/repos/(?P<repo_id>[-0-9a-f]{36})/$', GroupRepo.as_view(), name="api2-grouprepo"),
url(r'^html/events/$', EventsHtml.as_view()),
url(r'^html/more_events/$', AjaxEvents.as_view(), name="more_events"),
url(r'^html/repo_history_changes/(?P<repo_id>[-0-9a-f]{36})/$', RepoHistoryChangeHtml.as_view(), name='api_repo_history_changes'),
url(r'^html/discussions/(?P<group_id>\d+)/$', DiscussionsHtml.as_view(), name="api_discussions"),
url(r'^html/discussion/(?P<msg_id>\d+)/$', DiscussionHtml.as_view(), name="api_discussion"),
url(r'^html/more_discussions/(?P<group_id>\d+)/$', AjaxDiscussions.as_view(), name="more_discussions"),
url(r'^html/newreply/$', NewReplyHtml.as_view()),
url(r'^html/usermsgs/(?P<id_or_email>[^/]+)/$', UserMsgsHtml.as_view()),
url(r'^html/more_usermsgs/(?P<id_or_email>[^/]+)/$', AjaxUserMsgs.as_view(), name="api_more_usermsgs"),
# Folowing is only for debug, will be removed
#url(r'^html/newreply2/$', api_new_replies),
#url(r'^html/events2/$', activity2),
#url(r'^html/more_events/$', events2, name="more_events"),
#url(r'^html/repo_history_changes/(?P<repo_id>[-0-9a-f]{36})/$', api_repo_history_changes, name='api_repo_history_changes'),
#url(r'^html/discussions2/(?P<group_id>\d+)/$', discussions2, name="api_discussions2"),
#url(r'^html/discussion/(?P<msg_id>\d+)/$', discussion2, name="api_discussion2"),
#url(r'^html/more_discussions/(?P<group_id>\d+)/$', more_discussions2, name="more_discussions"),
#url(r'^html/usermsgs2/(?P<id_or_email>[^/]+)/$', api_usermsgs),
#url(r'^html/more_usermsgs/(?P<id_or_email>[^/]+)/$', api_more_usermsgs, name="api_more_usermsgs"),
# Deprecated
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/fileops/delete/$', OpDeleteView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/fileops/copy/$', OpCopyView.as_view()),
url(r'^repos/(?P<repo_id>[-0-9-a-f]{36})/fileops/move/$', OpMoveView.as_view()),
)
# serve office converter static files
from seahub.utils import HAS_OFFICE_CONVERTER
if HAS_OFFICE_CONVERTER:
from seahub.utils import OFFICE_HTML_DIR
urlpatterns += patterns('',
url(r'^office-convert/static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': OFFICE_HTML_DIR}, name='api_office_convert_static'),
)
urlpatterns += patterns('',
url(r'^office-convert/status/$', OfficeConvertQueryStatus.as_view()),
)
urlpatterns += patterns('',
url(r'^office-convert/generate/repos/(?P<repo_id>[-0-9-a-f]{36})/$', OfficeGenerateView.as_view()),
)
| madflow/seahub | seahub/api2/urls.py | Python | apache-2.0 | 7,834 |
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="heatmap.colorbar", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/heatmap/colorbar/_borderwidth.py | Python | mit | 520 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable, ln_bounded
from variable_functions import my_attribute_label
class ln_employment_within_DDD_minutes_travel_time_hbw_am_transit_walk(Variable):
"""Natural log of employment_within_DDD_minutes_travel_time_hbw_am_transit_walk"""
_return_type="float32"
def __init__(self, number):
self.variable_name = 'employment_within_%s_minutes_travel_time_hbw_am_transit_walk' % number
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label(self.variable_name)]
def compute(self, dataset_pool):
return ln_bounded(self.get_dataset().get_attribute(self.variable_name))
#this is a special case of commercial_sqft_within_walking_distance, so the unnittest is there
#the ln_bounded function is tested in ln_commercial_sqft | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/psrc/zone/ln_employment_within_DDD_minutes_travel_time_hbw_am_transit_walk.py | Python | gpl-2.0 | 984 |
#!/usr/bin/python
from sys import argv
import random
def main():
gra = "{}.gra".format(argv[-1])
n = int(argv[1])
m = int(argv[2])
gengnm(n,m,gra)
def gengnm(n,m,fn):
l = [[] for i in range(n)]
p = list(range(n))
random.shuffle(p)
random.seed()
for i in range(m):
s = random.randrange(0,n)
t = s
done = False
while not done:
t = random.randrange(0,n)
while t == s:
t = random.randrange(0,n)
if s < t and p[t] not in l[p[s]]:
l[p[s]].append(p[t])
done = True
elif s > t and p[s] not in l[p[t]]:
l[p[t]].append(p[s])
done = True
f = open(fn, "w")
f.write("graph_for_greach\n")
f.write("{}\n".format(len(l)))
for s in range(len(l)):
f.write("{}:".format(s))
for t in l[s]:
f.write(" {}".format(t))
f.write(" #\n")
f.close()
if __name__ == "__main__":
main()
| fiji-flo/preach | gendag.py | Python | mit | 890 |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field placeholders on 'NewsEntry'
db.delete_table('multilingual_news_newsentry_placeholders')
def backwards(self, orm):
# Adding M2M table for field placeholders on 'NewsEntry'
db.create_table(u'multilingual_news_newsentry_placeholders', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('newsentry', models.ForeignKey(orm[u'multilingual_news.newsentry'], null=False)),
('placeholder', models.ForeignKey(orm['cms.placeholder'], null=False))
))
db.create_unique(u'multilingual_news_newsentry_placeholders', ['newsentry_id', 'placeholder_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'multilingual_news.newsentry': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'NewsEntry'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'multilingual_news_contents'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'excerpt': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'multilingual_news_excerpts'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'image_float': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_source_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'image_source_url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'multilingual_news.newsentrytitle': {
'Meta': {'object_name': 'NewsEntryTitle'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multilingual_news.NewsEntry']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '512'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'multilingual_news.recentplugin': {
'Meta': {'object_name': 'RecentPlugin', 'db_table': "u'cmsplugin_recentplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_language_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['multilingual_news']
| bitmazk/django-multilingual-news | multilingual_news/south_migrations/0007_auto.py | Python | mit | 13,164 |
from .private import cffi
def bisect_to_tolerance(initial_mesh, tolerance):
return cffi.bisect_to_tolerance(initial_mesh, tolerance)
def threshold(initial_mesh, tolerance, corner_indices, corner_radians):
return cffi.threshold(initial_mesh, tolerance, corner_indices, corner_radians)
| Andlon/crest | pycrest/refinement.py | Python | mit | 296 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.