repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
tnotstar/pycalcstats | src/stats/utils.py | Python | mit | 3,781 | 0.00238 | #!/usr/bin/env python3
## Copyright (c) 2011 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""
General utilities used by the stats package.
"""
__all__ = ['add_partial', 'coroutine','minmax']
import collections
import functools
import itertools
import math
# === Exceptions ===
class StatsError(ValueError):
pass
# === Helper functions ===
def sorted_data(func):
"""Decorator to sort data passed to stats functions."""
@functools.wraps(func)
def inner(data, *args, **kwargs):
data = sorted(data)
return func(data, *args, **kwargs)
return inner
def as_sequence(iterable):
"""Helper function to convert iterable arguments into sequences."""
if isinstance(iterable, (list, tuple)): return iterable
else: return list(iterable)
def _generalised_sum(data, func):
"""_generalised_sum(data, func) -> len(data), sum(func(items of data))
Return a two-tuple of the length of data and the sum of func() of the
items of data. If func is None, use just the sum of items of data.
"""
# Try fast path.
try:
count = len(data)
except TypeError:
# Slow path for iterables without len.
# We want to support BIG data streams, so avoid converting to a
# list. Since we need both a count and a sum, we iterate over the
# items and emulate math.fsum ourselves.
ap = add_partial
partials = []
count = 0
if func is None:
# Note: we could check for func is None inside the loop. That
# is much slower. We could also say func = lambda x: x, which
# isn't as bad but still somewhat expensive.
for count, x in enumerate(data, 1):
ap(x, partials)
else:
for count, x in enumerate(data, 1):
ap(func(x), partials)
total = math.fsum(partials)
else: # Fast path continues.
if func is None:
# See comment above.
total = math.fsum(data)
else:
total = math.fsum(func(x) for x in data)
return count, total
# FIXME this may not be accurate enough for 2nd moments (x-m)**2
# A more accurate algorithm may be the compensated version:
# sum2 = sum(x-m)**2) as above
# sumc = sum(x-m) # Should be zero, but may not be.
# total = sum2 - sumc**2/n
def _sum_sq_deviations(data, m):
"""Returns the sum of square deviations (SS).
Helper function for calculating variance.
"""
if m is None:
# Two pass algorithm.
data = as_sequence(data)
n, total = _generalised_sum(data, None)
if n == 0:
return (0, total)
m = total/n
return _generalised_sum(data, lambda x: (x-m)**2)
def _sum_prod_deviations(xydata, mx, my):
"""Returns the sum of the product of deviations (SP).
Helper function for calculating covariance.
"""
if mx is None:
# Two pass algorithm.
xydata = as_sequence(xydata)
nx, sumx = _generalised_sum((t[0] for t in xydata), None)
if nx == 0:
raise StatsError('no data items')
mx = sumx/nx
if my is None:
| # Two pass algorithm.
xydata = as_sequence(xydata)
ny, sumy = _generalised_sum((t[1] for t in xydata), None)
if ny == 0:
raise StatsError('no data items')
my = sumy/ny
return _generalised_sum(xydata, lambda t: (t[0]-mx)*(t[1]-my))
def _validate_int(n):
# This will raise TypeError, Overflow | Error (for infinities) or
# ValueError (for NANs or non-integer numbers).
if n != int(n):
raise ValueError('requires integer value')
# === Generic utilities ===
from stats import minmax, add_partial
|
Shashank95/Intellifarm | logdata.py | Python | mit | 1,310 | 0.010687 | import RPi.GPIO as GPIO
import os
import time
import datetime
import glob
import MySQLdb
from time import strftime
import serial
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
counter = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(15,GPIO.IN)
# Variables for MySQL
db = MySQLdb.connect(host="localhost", user="root",passwd="deb280794", db="temp_database")
cur = db.cursor()
while True:
x = ser.readline()
f=x.split()
moisture = f[1]
humidity = f[4]
temp = f[7]
print("Moisture: ")
print moisture
print("Humidity: ")
print humidity
print("Temperature: ")
print temp
datetimeWrite = (time.strftime("%Y-%m-%d ") + time.strftime("%H:%M:%S"))
sql = ("""INSERT INTO tempLog (datetime,temperature,humidity,m | oisture) VALUES (%s,%s,%s,%s)""",(datetimeWrite,temp,humidity,moisture))
try:
print "Writing to database..."
# Execute the SQL command
cur.execute(*sql) |
# Commit your changes in the database
db.commit()
print "Write Complete"
except:
# Rollback in case there is any error
db.rollback()
print "Failed writing to database"
time.sleep(0.5)
|
binghongcha08/pyQMD | setup.py | Python | gpl-3.0 | 324 | 0 | from setuptools | import setup
setup(name='scitools',
version='0.1',
description='The funniest joke in the world',
url='http://github.com/storborg/funniest',
author='Flying Circus',
author_email='flyingcircus@example.com',
license='MIT',
packages= | ['scitools'],
zip_safe=False)
|
debugger06/MiroX | osx/build/bdist.macosx-10.5-fat/lib.macosx-10.5-fat-2.7/miro/importmedia.py | Python | gpl-2.0 | 4,213 | 0.000949 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""``miro.importmedia`` -- functions for importing from other music jukeboxes.
"""
import os
import urllib
import xml.sax
import xml.sax.handler
import xml.sax.saxutils
ITUNES_XML_FILE = "iTunes Music Library.xml"
class iTunesMusicLibraryContentHandler(xml.sax.handler.ContentHandler):
"""A very specific content handler for handling the iTunes music
library file.
We only care about snarfing the music path then we bail out.
This is done by getting the value <key>Music Folder</key>
which will appear as <string>...</string>.
"""
MUSIC_FOLDER_KEYNAME = "Music Folder"
def __init__(self):
self.element_name = None
self.in_music_folder_key = False
self.music_path = None
def startElement(self, name, attrs):
self.element_name = name
def characters(self, content):
if self.element_name == 'key' and content == self.MUSIC_FOLDER_KEYNAME:
self.in_music_folder_key = True
return
if self.in_music_folder_key and self.element_name == 'string':
# Must convert to string content - otherwise we get into unicode
# troubles when we unquote the URI escapes.
self.music_path = str(content)
self.in_music_folder_key = False
return
# Wasn't followed with a <string> as expected...
self.in_music_folder_key = False
def file_path_xlat(path):
"""Convert iTunes path to what we can handle."""
try:
file_url_bits = "file://localhost"
if not path.startswith(file_url_bits):
return None
path = urllib.url2pathname(path[len(file_url_bits):])
if isinstance(path, str):
return path.decode('utf-8')
return path
# bad path catchall
except StandardError:
return None
def import_itunes_path(path):
"""Look for a specified iTunes Music Library.xml file from the specified
path. Returns the path of the music library as sp | ecified in the
iTunes settings or None if it cannot find the xml file, or does not
contain the path for some reason."""
music_path = None
try:
parser = xml.sax.make_parser()
| handler = iTunesMusicLibraryContentHandler()
parser.setContentHandler(handler)
# Tell xml.sax that we don't want to parse external entities, as it
# will stall when no network is installed.
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.setFeature(xml.sax.handler.feature_external_pes, False)
parser.parse(os.path.join(path, ITUNES_XML_FILE))
music_path = file_path_xlat(handler.music_path)
return music_path
except (IOError, xml.sax.SAXParseException):
pass
|
znoland3/zachdemo | venvdir/lib/python3.4/site-packages/mako/ext/autohandler.py | Python | mit | 1,829 | 0 | # ext/autohandler.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""adds autohandler functionality to Mako templates.
requires t | hat the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohan | dler(template, context, name='somefilename')}"/>
"""
import posixpath
import os
import re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '', path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False
|
GBlomqvist/blender-Wirebomb | src/ops.py | Python | gpl-3.0 | 4,735 | 0.001901 | # Copyright (C) 2020 Gustaf Blomqvist
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# <pep8 compliant>
from operator import attrgetter
from time import time
import bpy
from . import utils
from . import wirebomb
class WIREBOMB_OT_set_up(bpy.types.Operator):
"""Set up scene"""
bl_label = "Set Up"
bl_idname = 'wirebomb.set_up'
def execute(self, context):
start = time()
wirebomb_scene = wirebomb.Wirebomb(context.scene)
error_msg = wirebomb_scene.set_up_new()
if error_msg:
self.report({'ERROR'}, error_msg)
return {'CANCELLED'}
self.report({'INFO'}, "Setup done in {} seconds!".format(round(time() - start, 3)))
return {'FINISHED'}
def list_add_collection(scene, list_prop, collection):
"""
Adds a collection to a list in the addon's UI.
:param scene: The Blender scene.
:param list_prop: Name of the property for the UI list, e.g. 'collections_affected'.
:param collection: The collection to add to the list.
"""
master_coll = scene.collection
ui_list = getattr(scene.wirebomb, list_prop)
if collection not in map(attrgetter('value'), ui_list):
item = ui_list.add()
# FIXME: See FIXME in ui.py
if collection == master_coll:
item.value = master_coll
item.name = master_coll.name
else:
# not using item.name since it's not necessary, and hard to update when changed elsewhere
item.value = collection
setattr(scene.wirebomb, list_prop + '_active', len(ui_list) - 1)
def list_remove_collection(sce | ne, list_prop, collection_index):
"""
Removes a collection from one of the lists in the UI.
:param scene: The Blender scene.
:param list_prop: Name of the property for the UI list, e.g. 'collections_affected'.
:param | collection_index: Index of the collection to remove from the list.
"""
ui_list = getattr(scene.wirebomb, list_prop)
ui_list.remove(collection_index)
# decrement active index if last was removed
attr_active_index = list_prop + '_active'
active_index = getattr(scene.wirebomb, attr_active_index)
if len(ui_list) == active_index:
setattr(scene.wirebomb, attr_active_index, active_index - 1)
# TODO: Handle warning in API:
# There is a known bug with using a callback,
# Python must keep a reference to the strings returned by the callback or Blender will misbehave or even crash.
def get_collections(_self, context):
# according to docs, context may be None
if not context:
return ()
master_coll = context.scene.collection
scene_collections = utils.get_collection_hierarchy(master_coll)
next(scene_collections)
collection_tuples = [(master_coll.name, utils.SCENE_COLL_NAME, '', 'GROUP', 0)]
collection_tuples.extend([(col.name, col.name, '', 'GROUP', i) for i, col in enumerate(scene_collections, 1)])
return collection_tuples
class WIREBOMB_OT_add_collection(bpy.types.Operator):
"""Add a collection from this scene"""
bl_label = "Add Collection"
bl_idname = 'wirebomb.add_collection'
bl_property = 'collection'
list: bpy.props.StringProperty()
collection: bpy.props.EnumProperty(items=get_collections)
def invoke(self, context, _event):
context.window_manager.invoke_search_popup(self)
return {'FINISHED'}
def execute(self, context):
list_add_collection(context.scene, self.list, utils.collection_from_name(context.scene, self.collection))
return {'FINISHED'}
class WIREBOMB_OT_remove_collection(bpy.types.Operator):
"""Remove the selected collection"""
bl_label = "Remove Collection"
bl_idname = 'wirebomb.remove_collection'
list: bpy.props.StringProperty()
def execute(self, context):
list_remove_collection(context.scene, self.list, getattr(context.scene.wirebomb, self.list + '_active'))
return {'FINISHED'}
classes = (
WIREBOMB_OT_set_up,
WIREBOMB_OT_add_collection,
WIREBOMB_OT_remove_collection,
)
register, unregister = bpy.utils.register_classes_factory(classes)
|
gabobert/climin | climin/__init__.py | Python | bsd-3-clause | 899 | 0 | from __future__ imp | ort absolute_import
# What follows is part of a hack to make control breaking work on windows even
# if scipy.stats ims imported. See:
# http://stackoverflow.com/questions/15457786/ctrl-c-crashes-python-after-importing-scipy-stats
import sys
import os
import imp
import ctypes
if sys.platform == 'win32':
basepath = imp.find_module('numpy')[1]
ctypes.CDLL(os.path.join(basepath, 'core', 'libmmd.dll'))
ctypes.CDLL(os.path.join(basepath, 'core', 'libif | coremd.dll'))
from .adadelta import Adadelta
from .adam import Adam
from .asgd import Asgd
from .bfgs import Bfgs, Lbfgs, Sbfgs
from .cg import ConjugateGradient, NonlinearConjugateGradient
from .gd import GradientDescent
from .nes import Xnes
from .rmsprop import RmsProp
from .rprop import Rprop
from .smd import Smd
from radagrad import Radagrad
from adagrad import Adagrad
from adagrad_full import AdagradFull
|
ashaarunkumar/spark-tk | python/sparktk/dicom/ops/drop_rows_by_keywords.py | Python | apache-2.0 | 4,404 | 0.003005 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def drop_rows_by_keywords(self, keywords_values_dict):
"""
Drop the rows based on dictionary of {"keyword":"value"}(applying 'and' operation on dictionary) from column holding xml string.
Ex: keywords_values_dict -> {"SOPInstanceUID":"1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685", "Manufacturer":"SIEMENS", "StudyDate":"20030315"}
Parameters
----------
:param keywords_values_dict: (dict(str, str)) dictionary of keywords and values from xml string in metadata
Examples
--------
>>> dicom_path = "../datasets/dicom_uncompressed"
>>> dicom = tc.dicom.import_dcm(dicom_path)
>>> dicom.metadata.count()
3
<skip>
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 0 <?xml version="1.0" encodin...
[1] 1 <?xml version="1.0" encodin...
[2] 2 <?xml version="1.0" encodin...
</skip>
#Part of xml string looks as below
<?xml version="1.0" encoding="UTF-8"?>
<NativeDicomModel xml:space="preserve">
<DicomAttribute keyword="FileMetaInformationVersion" tag="00020001" vr="OB"><InlineBinary>AAE=</InlineBinary></DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPClassUID" tag="00020002" vr="UI"><Value number="1">1.2.840.10008.5.1.4.1.1.4</Value></ | DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPInstanceUID" tag="00020003" vr="UI"><Value number="1">1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685</Value></DicomAttribute>
...
>>> keywords_values_dict = {"SOPInstanceUID":"1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685", "Manufacturer":"SIEMENS", "StudyDate":"20030315"}
| >>> dicom.drop_rows_by_keywords(keywords_values_dict)
>>> dicom.metadata.count()
2
<skip>
#After drop_rows
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 1 <?xml version="1.0" encodin...
[1] 2 <?xml version="1.0" encodin...
>>> dicom.pixeldata.inspect(truncate=30)
[#] id imagematrix
===========================================================
[0] 1 [[ 0. 0. 0. ..., 0. 0. 0.]
[ 0. 70. 85. ..., 215. 288. 337.]
[ 0. 63. 72. ..., 228. 269. 317.]
...,
[ 0. 42. 40. ..., 966. 919. 871.]
[ 0. 42. 33. ..., 988. 887. 860.]
[ 0. 46. 38. ..., 983. 876. 885.]]
[1] 2 [[ 0. 0. 0. ..., 0. 0. 0.]
[ 0. 111. 117. ..., 159. 148. 135.]
[ 0. 116. 111. ..., 152. 138. 139.]
...,
[ 0. 49. 18. ..., 1057. 965. 853.]
[ 0. 42. 20. ..., 1046. 973. 891.]
[ 0. 48. 26. ..., 1041. 969. 930.]]
</skip>
"""
if not isinstance(keywords_values_dict, dict):
raise TypeError("keywords_values_dict should be a type of dict, but found type as %" % type(keywords_values_dict))
for key, value in keywords_values_dict.iteritems():
if not isinstance(key, basestring) or not isinstance(value, basestring):
raise TypeError("both keyword and value should be of <type 'str'>")
#Always scala dicom is invoked, as python joins are expensive compared to serailizations.
def f(scala_dicom):
scala_dicom.dropRowsByKeywords(self._tc.jutils.convert.to_scala_map(keywords_values_dict))
self._call_scala(f) |
redbo/swift | releasenotes/source/conf.py | Python | apache-2.0 | 10,523 | 0.000095 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# swift documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 3 17:01:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
from swift import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swift Release Notes'
copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places througho | ut the
# built documents.
#
# The short X.Y version.
version = __version__.rsplit(' | .', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
# todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'swift v2.10.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SwiftReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# # The paper size ('letterpaper' or 'a4paper').
# #
# # 'papersize': 'letterpaper',
# # The font size ('10pt', '11pt' or '12pt').
# #
# # 'pointsize': '10pt',
# # Additional s |
tryton/calendar_classification | calendar_.py | Python | gpl-3.0 | 5,284 | 0.001893 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import vobject
from trytond.tools import reduce_ids, grouped_slice
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
__all__ = ['Event']
__metaclass__ = PoolMeta
class Event:
__name__ = 'calendar.event'
@classmethod
def __setup__(cls):
super(Event, cls).__setup__()
cls._error_messages.update({
'transparent': 'Free',
'opaque': 'Busy',
})
@classmethod
def search(cls, domain, offset=0, limit=None, order=None, count=False,
query=False):
if Transaction().user:
domain = domain[:]
domain = [domain,
['OR',
[
('classification', '=', 'private'),
['OR',
('calendar.owner', '=', Transaction().user),
('calendar.write_users', '=', Transaction().user),
],
],
('classification', '!=', 'private'),
],
]
records = super(Event, cls).search(domain, offset=offset, limit=limit,
order=order, count=count, query=query)
if Transaction().user:
# Clear the cache as it was not cleaned for confidential
cache = Transaction().get_cache()
cache.pop(cls.__name__, None)
return records
@classmethod
def create(cls, vlist):
events = super(Event, cls).create(vlist)
if (cls.search([('id', 'in', [x.id for x in events])], count=True)
!= len(events)):
cls.raise_user_error('access_error', cls.__doc__)
return events
@classmethod
def _clean_confidential(cls, record, transp):
'''
Clean confidential record
'''
summary = cls.raise_user_error(transp, raise_exception=False)
if 'summary' in record:
record['summary'] = summary
vevent = None
if 'vevent' in record:
vevent = record['vevent']
if vevent:
vevent = vobject.readOne(str(vevent))
if hasattr(vevent, 'summary'):
vevent.summary.value = summary
for field, value in (
('description', ''),
('categories', []),
('location', None),
('status', ''),
('organizer', ''),
('attendees', []),
('alarms', [])):
if field in record:
record[field] = value
if field + '.rec_name' in record:
record[field + '.rec_name'] = ''
if vevent:
if hasattr(vevent, field):
delattr(vevent, field)
if vevent:
record['vevent'] = vevent.serialize()
@classmethod
def read(cls, ids, fields_names=None):
Rule = Pool().get('ir.rule')
cursor = Transaction().connection.cursor()
table = cls.__table__()
if len(set(ids)) != cls.search([('id', 'in', ids)],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
writable_ids = []
domain = Rule.query_get(cls.__name__, mode='write')
if domain:
for sub_ids in grouped_slice(ids):
red_sql = reduce_ids(table.id, sub_ids)
cursor.execute(*table.select(table.id,
where=red_sql & table.id.in_(domain)))
writable_ids.extend(x[0] for x in cursor.fetchall())
else:
writable_ids = ids
writable_ids = set(writable_ids | )
if fields_names is None:
fields_names = []
fields_names = fields_names[:]
to_remove = set()
for field in ('classification', 'calendar', 'transp'):
if field not in fields_names:
f | ields_names.append(field)
to_remove.add(field)
res = super(Event, cls).read(ids, fields_names=fields_names)
for record in res:
if record['classification'] == 'confidential' \
and record['id'] not in writable_ids:
cls._clean_confidential(record, record['transp'])
for field in to_remove:
del record[field]
return res
@classmethod
def write(cls, *args):
for events in args[::2]:
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
super(Event, cls).write(*args)
for events in args[::2]:
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
@classmethod
def delete(cls, events):
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
super(Event, cls).delete(events)
|
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_text/textwrap_fill.py | Python | apache-2.0 | 102 | 0 | import textwrap |
from textwrap_example import sample_te | xt
print(textwrap.fill(sample_text, width=50))
|
cdcq/jzyzj | test/test_controller.py | Python | mit | 932 | 0.001073 | import unittest
import syzoj
import hashlib
from random import randint
class TestRegister(unittest.TestCase):
def md5_pass(self, password):
md5 | = hashlib.md5()
md5.update(password)
return md5.hexdigest()
def test_register(self):
user = "tester_%d" % randint(1, int(1e9))
pw = self.md5_pass("123_%d" % randint(1, 100))
email = "84%d@qq.com" % randint(1, 10000)
print user, pw, email
self.assertEqual(syzoj.c | ontroller.register(user, pw, email), 1)
self.assertNotEqual(syzoj.controller.register(user, pw, email), 1)
def test_multiple_register(self):
rid = randint(1, 10000)
for i in range(1, 2):
pw = self.md5_pass("123_%d_%d" % (rid, i))
print i, pw
self.assertEqual(syzoj.controller.register("hello_%d_%d" % (rid, i), pw, "%d@qq.com" % i), 1)
if __name__ == "__main__":
unittest.main()
|
telegraphic/fits2hdf | fits2hdf/unit_conversion.py | Python | mit | 2,314 | 0.000432 | # -*- coding: utf-8 -*-
"""
unit_conversion.py
==================
Functions for checking and sanitizing units that do not follow the FITS specification.
This uses functions from ``astropy.unit`` to parse and handle units.
"""
import warnings
from astropy.units import Unit, UnrecognizedUnit
from astropy.io.fits.verify import VerifyWarning
class UnitWarning(VerifyWarning):
"""
Unit warning class
Used when units do not parse or parse oddly
"""
def fits_to_units(unit_str):
""" Do a lookup from a astropy unit and return a fits unit string
unit_str (str): a FITS unit string
returns an astropy.units.Unit(), or UnrecognizedUnit()
Notes
-----
This will attempt to correct some common mistakes in the FITS format.
"""
unit_lookup = {
'meters': 'm',
'meter': 'm',
'degrees': 'deg',
'degree': 'deg',
'hz': 'Hz',
'hertz': 'Hz',
'second': 's',
'sec': 's',
'secs' | : ' | s',
'days': 'd',
'day': 'd',
'steradians': 'sr',
'steradian': 'sr',
'radians': 'rad',
'radian': 'rad',
'jy': 'Jy',
'au': 'AU',
}
try:
new_units = ""
if unit_str is None:
unit_str = ''
unit_str = unit_str.lower()
unit_list = unit_str.split("/")
for uu in unit_list:
if uu.endswith("s") and len(uu) > 1:
uu = uu[:-1]
corrected_unit = unit_lookup.get(uu, uu)
new_units += corrected_unit
new_units += " / "
new_units = new_units[:-3]
unit = Unit(new_units)
return unit
except ValueError:
warnings.warn("Unknown unit: %s" % new_units, UnitWarning)
return UnrecognizedUnit(unit_str)
def units_to_fits(unit):
""" Convert an astropy unit to a FITS format string.
uses the to_string() method built-in to astropy Unit()
Notes
-----
The output will be the format defined in the FITS standard:
http://fits.gsfc.nasa.gov/fits_standard.html
A roundtrip from fits_to_units -> units_to_fits may not return
the original string, as people often don't follow the standard.
"""
if unit is None:
unit = Unit('')
return unit.to_string("fits").upper()
|
actframework/FrameworkBenchmarks | frameworks/Python/api_hour/yocto_http/etc/hello/api_hour/gunicorn_conf.py | Python | bsd-3-clause | 309 | 0.003236 | import multiprocessing
import os
_is_travis = os.environ.get('TRAVIS') == 'true'
workers = multiprocessing.cpu_count()
if _is_travis:
workers = 2
bind = ['0.0.0.0:8080', '0.0.0.0:8081', '0.0.0.0:8082']
keepalive = 120
errorlog = '-'
pidfile = '/t | mp/api_hour. | pid'
pythonpath = 'hello'
backlog = 10240000 |
rafallo/p2c | gui/desktop/main.py | Python | mit | 428 | 0.004673 | # -*- coding: utf-8 -*-
import sys
import logging
from PyQt5.QtWidgets import QApplication
from p | 2c.app import P2CDaemon
from gui.desktop.mainwindow import MainWindow
logging.basicConfig(level=logging.DEBUG | )
def main():
app = QApplication(sys.argv)
ui = MainWindow()
ui.setupUi(ui)
logic = P2CDaemon()
ui.connect_app(logic)
# ui.play()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
roboime/pyroboime | roboime/clients/iris.py | Python | agpl-3.0 | 1,711 | 0.001169 | #
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
import IPython
from .cli import CLI
class IRIS(CLI):
def __init__(self, *args, **kwargs):
super(IRIS, self).__init__(*args, **kwargs)
globals()['world'] = self.world
for cmd, func in self.cmd_dict.iteritems():
# XXX: creating a closure to avoid sharing references
# Basically what's being done here is: we create a function
# that's essentially the original one | , (same name, call and
# documentation, and inserting that into the closure.
def _closure_hack():
cmd_func = func.im_func
global_func = lambda *args: cmd_func(self, *args)
global_func.orig_func = cmd_func
| global_func.func_name = cmd_func.func_name
global_func.func_doc = cmd_func.func_doc
globals()[cmd] = global_func
_closure_hack()
def cli_loop(self):
print 'Welcome to IRIS (Interactive RoboIME Intelligence Shell)'
#IPython.start_ipython()
IPython.embed()
# quit after ipython exits
self.quit = True
def write(self, text, ok=True):
print text
|
ustroetz/python-osrm | osrm/extra.py | Python | mit | 9,281 | 0.000215 | # -*- coding: utf-8 -*-
"""
@author: mthh
"""
import matplotlib
import numpy as np
from geopandas import GeoDataFrame, pd
from shapely.geometry import MultiPolygon, Polygon, Point
from . import RequestConfig, Point as _Point
from .core import table
if not matplotlib.ge | t_backend():
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
def contour_poly(gdf, field_name, n_class):
"""
Interpolate the time values (stored in the column ` | field_name`)
from the points contained in `gdf` and compute the contour polygons
in `n_class`.
Parameters
----------
gdf : :py:obj:`geopandas.GeoDataFrame`
The GeoDataFrame containing points and associated values.
field_name : str
The name of the column of *gdf* containing the value to use.
n_class : int
The number of class to use for contour polygons if levels is an
integer (exemple: levels=8).
Returns
-------
collection_polygons : :py:obj:matplotlib.contour.QuadContourSet
The shape of the computed polygons.
levels : list of ints/floats
The levels actually used when making the contours, excluding
the minimum (should be a list of `n_class` values).
"""
# Dont take point without value :
gdf = gdf.iloc[gdf[field_name].to_numpy().nonzero()[0]][:]
# Try to avoid unvalid geom :
if len(gdf.geometry.valid()) != len(gdf):
# Invalid geoms have been encountered :
valid_geoms = gdf.geometry.valid()
valid_geoms = valid_geoms.reset_index()
valid_geoms['idx'] = valid_geoms['index']
del valid_geoms['index']
valid_geoms[field_name] = \
valid_geoms.idx.apply(lambda x: gdf[field_name][x])
else:
valid_geoms = gdf[['geometry', field_name]][:]
# Always in order to avoid invalid value which will cause the fail
# of the griddata function :
try: # Normal way (fails if a non valid geom is encountered)
x = np.array([geom.coords.xy[0][0] for geom in valid_geoms.geometry])
y = np.array([geom.coords.xy[1][0] for geom in valid_geoms.geometry])
z = valid_geoms[field_name].values
except: # Taking the long way to load the value... :
x = np.array([])
y = np.array([])
z = np.array([], dtype=float)
for idx, geom, val in gdf[['geometry', field_name]].itertuples():
try:
x = np.append(x, geom.coords.xy[0][0])
y = np.append(y, geom.coords.xy[1][0])
z = np.append(z, val)
except Exception as err:
print(err)
# # compute min and max and values :
minx = np.nanmin(x)
miny = np.nanmin(y)
maxx = np.nanmax(x)
maxy = np.nanmax(y)
# Assuming we want a square grid for the interpolation
xi = np.linspace(minx, maxx, 200)
yi = np.linspace(miny, maxy, 200)
zi = griddata(x, y, z, xi, yi, interp='linear')
interval_time = int(round(np.nanmax(z) / n_class))
nb_inter = n_class + 1
# jmp = int(round((np.nanmax(z) - np.nanmin(z)) / 15))
# levels = [nb for nb in range(0, int(round(np.nanmax(z))+1)+jmp, jmp)]
levels = tuple([nb for nb in range(0, int(
np.nanmax(z) + 1) + interval_time, interval_time)][:nb_inter+1])
collec_poly = plt.contourf(
xi, yi, zi, levels, cmap=plt.cm.rainbow,
vmax=abs(zi).max(), vmin=-abs(zi).max(), alpha=0.35
)
return collec_poly, levels[1:]
def isopoly_to_gdf(collec_poly, field_name, levels):
"""
Transform a collection of matplotlib polygons (:py:obj:`QuadContourSet`)
to a :py:obj:`GeoDataFrame` with a columns (`field_name`) filled by the
values contained in `levels`.
Parameters
----------
collec_poly : :py:obj:matplotlib.contour.QuadContourSet
The previously retrieved collections of contour polygons.
field_name : str
The name of the column to create which will contain values from `levels`.
levels : list of ints/floats
The values to be used when creating the `GeoDataFrame` of polygons,
likely the values corresponding to the bins values
used to create the polygons in the contourf function.
Returns
-------
gdf_polygons : :py:obj:`GeoDataFrame`
The contour polygons as a GeoDataFrame, with a column filled
with the corresponding levels.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1: # There's some holes
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
if levels:
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
if levels:
data.append(levels[i])
if len(data) == len(polygons):
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
else:
return GeoDataFrame(geometry=polygons)
def make_grid(gdf, nb_points):
"""
Return a grid, based on the shape of *gdf* and on a *height* value (in
units of *gdf*).
Parameters
----------
gdf : GeoDataFrame
The collection of polygons to be covered by the grid.
nb_points : int
The number of expected points of the grid.
Returns
-------
grid : GeoDataFrame
A collection of polygons.
"""
xmin, ymin, xmax, ymax = gdf.total_bounds
rows = int(nb_points**0.5)
cols = int(nb_points**0.5)
height = (ymax-ymin) / rows
width = (xmax-xmin) / cols
x_left_origin = xmin
x_right_origin = xmin + width
y_top_origin = ymax
y_bottom_origin = ymax - height
res_geoms = []
for countcols in range(cols):
y_top = y_top_origin
y_bottom = y_bottom_origin
for countrows in range(rows):
res_geoms.append((
(x_left_origin + x_right_origin) / 2, (y_top + y_bottom) / 2
))
y_top = y_top - height
y_bottom = y_bottom - height
x_left_origin = x_left_origin + width
x_right_origin = x_right_origin + width
return GeoDataFrame(
geometry=pd.Series(res_geoms).apply(lambda x: Point(x)),
crs=gdf.crs
)
class AccessIsochrone:
"""
Object allowing to query an OSRM instance for a matrix of distance within
a defined radius, store the distance (to avoid making the same query again
when not needed), interpolate time values on a grid and render the contour
polygons.
Parameters
----------
point_origin : 2-floats tuple
The coordinates of the center point to use as (x, y).
points_grid : int
The number of points of the underlying grid to use.
size : float
Search radius (in degree).
url_config : osrm.RequestConfig
The OSRM url to be requested.
Attributes
----------
center_point : collections.namedtuple
The coordinates of the point used a center (potentially moved from the
original point in order to be on the network).
grid : geopandas.GeoDataFrame
The point locations retrieved from OSRM (ie. potentially moved
to be on the routable network).
times : numpy.ndarray
The time-distance table retrieved from OSRM.
Methods
-------
render_contour(nb_class)
Render the contour polygon according to the choosen number of class.
"""
def __init__(self, point_origin, points_grid=250,
size=0.4, url_config=RequestConfig):
gdf = GeoDataFrame(geometry=[Point(point_origin).buffer(size)])
grid = make_grid(gdf, points_grid)
coords_grid = \
|
poiesisconsulting/openerp-restaurant | portal_project/tests/test_access_rights.py | Python | agpl-3.0 | 14,973 | 0.004809 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.project.tests.test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestPortalProjectBase(TestProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
cr, uid = self.cr, self.uid
# Find Portal group
group_portal_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal')
self.group_portal_id = group_portal_ref and group_portal_ref[1] or False
# Find Public group
group_public_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_public')
self.group_public_id = group_public_ref and group_public_ref[1] or False
# # Test users to use through the various tests
self.user_portal_id = self.res_users.create(cr, uid, {
'name': 'Chell Portal',
'login': 'chell',
'alias_name': 'chell',
'groups_id': [(6, 0, [self.group_portal_id])]
})
self.user_public_id = self.res_users.create(cr, uid, {
'name': 'Donovan Public',
'login': 'donovan',
'alias_name': 'donovan',
'groups_id': [(6, 0, [self.group_public_id])]
})
self.user_manager_id = self.res_users.create(cr, uid, {
'name': 'Eustache Manager',
'login': 'eustache',
'alias_name': 'eustache',
'groups_id': [(6, 0, [self.group_project_manager_id])]
})
# Test 'Pigs' project
self.project_pigs_id = self.project_project.create(cr, uid, {
'name': 'Pigs', 'privacy_visibility': 'public'}, {'mail_create_nolog': True})
# Various test tasks
self.task_1_id = self.project_task.create(cr, uid, {
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_2_id = self.project_task.create(cr, uid, {
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_3_id = self.project_task.create(cr, uid, {
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_4_id = self.project_task.create(cr, uid, {
'name': 'Test4', 'user_id': self.user_projectuser_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_5_id = self.project_task.create(cr, uid, {
'name': 'Test5', 'user_id': self.user_portal_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_6_id = self.project_task.create(cr, uid, {
'name': 'Test6', 'user_id': self.user_public_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
class TestPortalProject(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
cr, uid, pigs_id = self.cr, self.uid, self.project_pigs_id
# ----------------------------------------
# CASE1: public project
# ----------------------------------------
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, pigs_id, ['name'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_projectuser_id, task_ids, ['name'])
# Test: all project tasks writable
self.project_task.write(cr, self.user_projectuser_id, task_ids, {'description': 'TestDescription'})
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, pigs_id, ['name'])
# Test: no project task visible
self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)])
# Test: no project task readable
self.assertRaises(AccessError, self.project_task.read, cr, self.user_none_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_none_id, task_ids, {'description': 'TestDescription'})
# Do: Chell reads project -> ok (portal ok public)
self.project_project.read(cr, self.user_portal_id, pigs_id, ['name'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_portal_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_portal_id, task_ids, {'description': 'TestDescription'})
# Do: Donovan reads project -> ok (public)
self.project_project.read(cr, self.user_public_id, pigs_id, ['name'])
# Test: all proj | ect tasks visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: public user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_public_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_t | ask.write, cr, self.user_public_id, task_ids, {'description': 'TestDescription'})
# ----------------------------------------
# CASE2: portal project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'portal'})
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, pigs_id, ['name'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a portal project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, pigs_id, ['name'])
# Test: no project task searchable
self.assertRaises(AccessError, self.pr |
mlavin/django | tests/ordering/tests.py | Python | bsd-3-clause | 12,431 | 0.001368 | from datetime import datetime
from operator import attrgetter
from django.db.models import F
from django.db.models.functions import Upper
from django.test import TestCase
from .models import Article, Author, Reference
class OrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 2 | 6))
cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27))
cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27))
cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28))
cls.author_1 = | Author.objects.create(name="Name 1")
cls.author_2 = Author.objects.create(name="Name 2")
for i in range(2):
Author.objects.create()
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(), [
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline")
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"), [
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline")
)
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_nulls_first_and_last(self):
msg = "nulls_first and nulls_last are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True))
def test_order_by_nulls_last(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_last.
self.assertSequenceEqual(
Article.objects.order_by(F("author").desc(nulls_last=True)),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(F("author").asc(nulls_last=True)),
[self.a3, self.a4, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").desc(nulls_last=True)),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").asc(nulls_last=True)),
[self.a3, self.a4, self.a1, self.a2],
)
def test_order_by_nulls_first(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_first.
self.assertSequenceEqual(
Article.objects.order_by(F("author").asc(nulls_first=True)),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertSequenceEqual(
Article.objects.order_by(F("author").desc(nulls_first=True)),
[self.a1, self.a2, self.a4, self.a3],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").asc(nulls_first=True)),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").desc(nulls_first=True)),
[self.a1, self.a2, self.a4, self.a3],
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2], [
"Article 1",
"Article 2",
],
attrgetter("headline")
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3], [
"Article 2",
"Article 3",
],
attrgetter("headline")
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(
len(list(Article.objects.order_by("?"))), 4
)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.all().reverse()[:2], [
"Article 1",
"Article 3",
],
attrgetter("headline")
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F('headline').asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs1, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
qs2, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_no_reordering_after_slicing(self):
msg = 'Cannot reverse a query once a slice has been taken.'
qs = Article.objects.all()[0:2]
with self.assertRaisesMessage(TypeError, msg):
qs.reverse()
with self.assertRaisesMessage(TypeError, msg):
qs.last()
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_with_table_name(sel |
chrisjluc/ProjectEuler | Power.py | Python | mit | 409 | 0.01467 | class Power:
def __init__(self, base, exponent):
self.base = base
self.exponent = exponent
def addexponent(self):
self.exponent += 1
def getTotal(self):
return self.base**self.exponent
def getBase | (self):
return self.base
def getexponent(self):
return self. | exponent
def printSelf(self):
print self.base
print self.exponent |
paulnurkkala/comm-slackbot | plugins/get_source/get_source.py | Python | gpl-2.0 | 297 | 0.030303 | bot_user = 'U041TJU13'
irc_channel = 'C040NNZHT'
outputs = []
def process_message(data):
message_text = data.get('text')
if message_t | ext:
if (bot_user in message_text) and ('source' in message_text):
outputs.append([irc_channel, '`htt | ps://github.com/paulnurkkala/comm-slackbot`'])
|
OmniLayer/omnicore | test/functional/wallet_groups.py | Python | mit | 3,669 | 0.001363 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_approx,
assert_equal,
)
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
| v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for | i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main()
|
amlyj/pythonStudy | 2.7/standard_library/study_filter.py | Python | mit | 516 | 0.005587 | #!/usr/bin/env python
# -*- coding: ut | f-8 -*-
# @Time : 17-7-22 上午1:18
# @Author : tom.lee
# @Site :
# @File : study_filter.py
# @Software: PyCharm
"""
按照某种规则过滤掉一些元素
接收一个 boolean返回值的函数,可用时lambda,可以是自定义的函数,
迭代传入的可迭代对象的每个元素进行过滤
"""
lst = [1, 2, 3, 4, 5, 6]
# 所有奇数都会返回True, 偶数会返回False被过滤掉
print filter(lambda x: x % 2 != 0, lst)
# 输出结果 [1, 3, 5 | ]
|
Jgarcia-IAS/SITE | addons/website_forum/models/forum.py | Python | agpl-3.0 | 36,003 | 0.004444 | # -*- coding: utf-8 -*-
from datetime import datetime
import uuid
from werkzeug.exceptions import Forbidden
import logging
import openerp
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.exceptions import Warning
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class KarmaError(Forbidden):
""" Karma-related error, used for forum and posts. """
pass
class Forum(osv.Model):
"""TDE TODO: set karma values for actions dynamic for a given forum"""
_name = 'forum.forum'
_description = 'Forums'
_inherit = ['mail.thread', 'website.seo.metadata']
def init(self, cr):
""" Add forum uuid for user email validation. """
forum_uuids = self.pool['ir.config_parameter'].search(cr, SUPERUSER_ID, [('key', '=', 'website_forum.uuid')])
if not forum_uuids:
self.pool['ir.config_parameter'].set_param(cr, SUPERUSER_ID, 'website_forum.uuid', str(uuid.uuid4()), ['base.group_system'])
_columns = {
'name': fields.char('Name', required=True, translate=True),
'faq': fields.html('Guidelines'),
'description': fields.html('Description'),
# karma generation
'karma_gen_question_new': fields.integer('Asking a question'),
'karma_gen_question_upvote': fields.integer('Question upvoted'),
'karma_gen_question_downvote': fields.integer('Question downvoted'),
'karma_gen_answer_upvote': fields.integer('Answer upvoted'),
'karma_gen_answer_downvote': fields.integer('Answer downvoted'),
'karma_gen_answer_accept': fields.integer('Accepting an answer'),
'karma_gen_answer_accepted': fields.integer('Answer accepted'),
'karma_gen_answer_flagged': fields.integer('Answer flagged'),
# karma-based actions
'karma_ask': fields.integer('Ask a question'),
'karma_answer': fields.integer('Answer a question'),
'karma_edit_own': fields.integer('Edit its own posts'),
'karma_edit_all': fields.integer('Edit all posts'),
'karma_close_own': fields.integer('Close its own posts'),
'karma_close_all': fields.integer('Close all posts'),
'karma_unlink_own': fields.integer('Delete its own posts'),
'karma_unlink_all': fields.integer('Delete all posts'),
'karma_upvote': fields.integer('Upvote'),
'karma_downvote': fields.integer('Downvote'),
'karma_answer_accept_own': fields.integer('Accept an answer on its own questions'),
'karma_answer_accept_all': fields.integer('Accept an answer to all questions'),
'karma_editor_link_files': fields.integer('Linking files (Editor)'),
'karma_editor_clickable_link': fields.integer('Clickable links (Editor)'),
'karma_comment_own': fields.integer('Comment its own posts'),
'karma_comment_all': fields.integer('Comment all posts'),
'karma_comment_convert_own': fields.integer('Convert its own answers to comments and vice versa'),
'karma_comment_convert_all': fields.integer('Convert all answers to comments and vice versa'),
'karma_comment_unlink_own': fields.integer('Unlink its own comments'),
'karma_comment_unlink_all': fields.integer('Unlink all comments'),
'karma_retag': fields.integer('Change question tags'),
'karma_flag': fields.integer('Flag a post as offensive'),
}
def _get_default_faq(self, cr, uid, context=None):
fname = openerp.modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
_defaults = {
'description': 'This community is for professionals and enthusiasts of our products and services.',
'faq': _get_default_faq,
'karma_gen_question_new': 0, # set to null for anti spam protection
'karma_gen_question_upvote': 5,
'karma_gen_question_downvote': -2,
'karma_gen_answer_upvote': 10,
'karma_gen_answer_downvote': -2,
'karma_gen_answer_accept': 2,
'karma_gen_answer_accepted': 15,
'karma_gen_answer_flagged': -100,
'karma_ask': 3, # set to not null for anti spam protection
'karma_answer': 3, # set to not null for anti spam protection
'karma_edit_own': 1,
'karma_edit_all': 300,
'karma_close_own': 100,
'karma_close_all': 500,
'karma_unlink_own': 500,
'karma_unlink_all': 1000,
'karma_upvote': 5,
'karma_downvote': 50,
'karma_answer_accept_own': 20,
'karma_answer_accept_all': 500,
'karma_editor_link_files': 20,
'karma_editor_clickable_link': 20,
'karma_comment_own': 3,
'karma_comment_all': 5,
'karma_comment_convert_own': 50,
'karma_comment_convert_all': 500,
'karma_comment_unlink_own': 50,
'karma_comment_unlink_all': 500,
'karma_retag': 75,
'karma_flag': 500,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
return super(Forum, self).create(cr, uid, values, context=create_context)
class Post(osv.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC, write_date DESC"
def _get_user_vote(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
vote_ids = self.pool['forum.post.vote'].search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
for vote in self.pool['forum.post.vote'].browse(cr, uid, vote_ids, context=context):
res[vote.post_id.id] = vote.vote
return res
def _get_vote_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
for vote in post.vote_ids:
res[post.id] += int(vote.vote)
return res
def _get_post_from_vote(self, cr, uid, ids, context=None):
result = {}
for vote in self.pool['forum.post.vote'].browse(cr, uid, ids, context=context):
result[vote.post_id.id] = True
return result.keys()
def _get_user_favourite(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
if uid in [f.id for f in post.favourite_ids]:
res[post.id] = True
return res
def _get_favorite_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] += len(post.favourite_ids)
return res
def _ | get_post_from_hierarchy(self, cr, uid, ids, context=None):
post_ids = set(ids)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
if post.parent_id:
post_ids.add(post.parent_id.id)
return list(post_ids)
def _get_child_count(self, cr, uid, ids, field_name=False, arg={}, context=None):
res = dict.fromkeys(ids, 0)
for post in self.browse | (cr, uid, ids, context=context):
if post.parent_id:
res[post.parent_id.id] = len(post.parent_id.child_ids)
else:
res[post.id] = len(post.child_ids)
return res
def _get_uid_answered(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = any(answer.create_uid.id == uid for answer in post.child_ids)
return res
def _get_has_validated_answer(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
ans_ids = self.search(cr, uid, [('parent_id', 'in', ids), ('is_corr |
uvchik/pvlib-python | pvlib/test/test_spa.py | Python | bsd-3-clause | 16,534 | 0.002722 | import os
import datetime as dt
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
from numpy.testing import assert_almost_equal
import pandas as pd
import unittest
import pytest
from pvlib.location import Location
try:
from numba import __version__ as numba_version
numba_version_int = int(numba_version.split('.')[0] +
numba_version.split('.')[1])
except ImportError:
numba_version_int = 0
times = (pd.date_range('2003-10-17 12:30:30', periods=1, freq='D')
.tz_localize('MST'))
unixtimes = np.array(times.tz_convert('UTC').astype(np.int64)*1.0/10**9)
lat = 39.742476
lon = -105.1786
elev = 1830.14
pressure = 820
temp = 11
delta_t = 67.0
atmos_refract= 0.5667
JD = 2452930.312847
JC = 0.0379277986858
JDE = 2452930.313623
JCE = 0.037927819916852
JME = 0.003792781991685
L = 24.0182616917
B = -0.0001011219
R = 0.9965422974
Theta = 204.0182616917
beta = 0.0001011219
X0 = 17185.861179
X1 = 1722.893218
X2 = 18234.075703
X3 = 18420.071012
X4 = 51.686951
dPsi = -0.00399840
dEpsilon = 0.00166657
epsilon0 = 84379.672625
epsilon = 23.440465
dTau = -0.005711
lamd = 204.0085519281
v0 = 318.515579
v = 318.511910
alpha = 202.227408
delta = -9.31434
H = 11.10590
xi = 0.002451
dAlpha = -0.000369
alpha_prime = 202.22704
delta_prime = -9.316179
H_prime = 11.10627
e0 = 39.872046
de = 0.016332
e = 39.888378
theta = 50.11162
theta0 = 90 - e0
Gamma = 14.340241
Phi = 194.340241
year = 1985
month = 2
year_array = np.array([-499, 500, 1000, 1500, 1800, 1900, 1950, 1970, 1985, 1990, 2000, 2005])
month_array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
dt_actual = | 54.413442486
dt_actual_array = np.array([1.7184831e+04, 5.7088051e+03, 1.5730419e+03,
1.9801820e+02, 1.3596506e+01, -2.1171894e+00,
2.9289261e+01, 4.0824887e+01, 5.4724581e+01,
5.7426651e+01, 6.4108015e+01, 6.5038015e+01])
mix_year_array = np.full((10), year)
mix_month_array = np.full(( | 10), month)
mix_year_actual = np.full((10), dt_actual)
mix_month_actual = mix_year_actual
class SpaBase(object):
"""Test functions common to numpy and numba spa"""
def test_julian_day_dt(self):
dt = times.tz_convert('UTC')[0]
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
second = dt.second
microsecond = dt.microsecond
assert_almost_equal(JD,
self.spa.julian_day_dt(year, month, day, hour,
minute, second, microsecond), 6)
def test_julian_ephemeris_day(self):
assert_almost_equal(JDE, self.spa.julian_ephemeris_day(JD, delta_t), 5)
def test_julian_century(self):
assert_almost_equal(JC, self.spa.julian_century(JD), 6)
def test_julian_ephemeris_century(self):
assert_almost_equal(JCE, self.spa.julian_ephemeris_century(JDE), 10)
def test_julian_ephemeris_millenium(self):
assert_almost_equal(JME, self.spa.julian_ephemeris_millennium(JCE), 10)
def test_heliocentric_longitude(self):
assert_almost_equal(L, self.spa.heliocentric_longitude(JME), 6)
def test_heliocentric_latitude(self):
assert_almost_equal(B, self.spa.heliocentric_latitude(JME), 6)
def test_heliocentric_radius_vector(self):
assert_almost_equal(R, self.spa.heliocentric_radius_vector(JME), 6)
def test_geocentric_longitude(self):
assert_almost_equal(Theta, self.spa.geocentric_longitude(L), 6)
def test_geocentric_latitude(self):
assert_almost_equal(beta, self.spa.geocentric_latitude(B), 6)
def test_mean_elongation(self):
assert_almost_equal(X0, self.spa.mean_elongation(JCE), 5)
def test_mean_anomaly_sun(self):
assert_almost_equal(X1, self.spa.mean_anomaly_sun(JCE), 5)
def test_mean_anomaly_moon(self):
assert_almost_equal(X2, self.spa.mean_anomaly_moon(JCE), 5)
def test_moon_argument_latitude(self):
assert_almost_equal(X3, self.spa.moon_argument_latitude(JCE), 5)
def test_moon_ascending_longitude(self):
assert_almost_equal(X4, self.spa.moon_ascending_longitude(JCE), 6)
def test_longitude_nutation(self):
assert_almost_equal(dPsi, self.spa.longitude_nutation(JCE, X0, X1, X2,
X3, X4), 6)
def test_obliquity_nutation(self):
assert_almost_equal(dEpsilon, self.spa.obliquity_nutation(JCE, X0, X1,
X2, X3, X4),
6)
def test_mean_ecliptic_obliquity(self):
assert_almost_equal(epsilon0, self.spa.mean_ecliptic_obliquity(JME), 6)
def test_true_ecliptic_obliquity(self):
assert_almost_equal(epsilon, self.spa.true_ecliptic_obliquity(
epsilon0, dEpsilon), 6)
def test_aberration_correction(self):
assert_almost_equal(dTau, self.spa.aberration_correction(R), 6)
def test_apparent_sun_longitude(self):
assert_almost_equal(lamd, self.spa.apparent_sun_longitude(
Theta, dPsi, dTau), 6)
def test_mean_sidereal_time(self):
assert_almost_equal(v0, self.spa.mean_sidereal_time(JD, JC), 3)
def test_apparent_sidereal_time(self):
assert_almost_equal(v, self.spa.apparent_sidereal_time(
v0, dPsi, epsilon), 5)
def test_geocentric_sun_right_ascension(self):
assert_almost_equal(alpha, self.spa.geocentric_sun_right_ascension(
lamd, epsilon, beta), 6)
def test_geocentric_sun_declination(self):
assert_almost_equal(delta, self.spa.geocentric_sun_declination(
lamd, epsilon, beta), 6)
def test_local_hour_angle(self):
assert_almost_equal(H, self.spa.local_hour_angle(v, lon, alpha), 4)
def test_equatorial_horizontal_parallax(self):
assert_almost_equal(xi, self.spa.equatorial_horizontal_parallax(R), 6)
def test_parallax_sun_right_ascension(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equal(dAlpha, self.spa.parallax_sun_right_ascension(
x, xi, H, delta), 4)
def test_topocentric_sun_right_ascension(self):
assert_almost_equal(alpha_prime,
self.spa.topocentric_sun_right_ascension(
alpha, dAlpha), 5)
def test_topocentric_sun_declination(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equal(delta_prime, self.spa.topocentric_sun_declination(
delta, x, y, xi, dAlpha,H), 5)
def test_topocentric_local_hour_angle(self):
assert_almost_equal(H_prime, self.spa.topocentric_local_hour_angle(
H, dAlpha), 5)
def test_topocentric_elevation_angle_without_atmosphere(self):
assert_almost_equal(
e0, self.spa.topocentric_elevation_angle_without_atmosphere(
lat, delta_prime, H_prime), 6)
def test_atmospheric_refraction_correction(self):
assert_almost_equal(de, self.spa.atmospheric_refraction_correction(
pressure, temp, e0, atmos_refract), 6)
def test_topocentric_elevation_angle(self):
assert_almost_equal(e, self.spa.topocentric_elevation_angle(e0, de), 6)
def test_topocentric_zenith_angle(self):
assert_almost_equal(theta, self.spa.topocentric_zenith_angle(e), 5)
def test_topocentric_astronomers_azimuth(self):
assert_almost_equal(Gamma, self.spa.topocentric_astronomers_azimuth(
H_prime, delta_prime, lat), 5)
def test_topocentric_azimuth_angle(self):
assert_almost_equal(Phi, self.spa.topocentric_azimuth_angle(Gamma), 5)
def test_solar_position(self):
assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_ |
450586509/DLNLP | src/dataUtils.py | Python | apache-2.0 | 5,682 | 0.012172 | #-*- coding: utf-8 -*-
#2016年 03月 03日 星期四 11:01:05 CST by Demobin
#code from:http://www.jianshu.com/p/7e233ef57cb6
import json
import h5py
import codecs
corpus_tags = ['S', 'B', 'M', 'E']
def saveCwsInfo(path, cwsInfo):
'''保存分词训练数据字典和概率'''
print('save cws info to %s'%path)
fd = open(path, 'w')
(initProb, tranProb), (vocab, indexVocab) = cwsInfo
j = json.dumps((initProb, tranProb))
fd.write(j + '\n')
| for char in vocab:
fd.write(char.encode('utf-8') + '\t' + str(vocab[char]) + '\n')
fd.close()
def loadCwsInfo(path):
'''载入分词训练数据字典和概率'''
print('load cws info from %s'%path)
fd = open(path, 'r')
line = fd.readline()
j = json.loads(line.strip())
initProb, tranProb = j[0], j[1]
lines = fd.readlines()
fd.close()
vocab = {}
indexVocab = [0 for i in range(len(lines))]
for line in | lines:
rst = line.strip().split('\t')
if len(rst) < 2: continue
char, index = rst[0].decode('utf-8'), int(rst[1])
vocab[char] = index
indexVocab[index] = char
return (initProb, tranProb), (vocab, indexVocab)
def saveCwsData(path, cwsData):
'''保存分词训练输入样本'''
print('save cws data to %s'%path)
#采用hdf5保存大矩阵效率最高
fd = h5py.File(path,'w')
(X, y) = cwsData
fd.create_dataset('X', data = X)
fd.create_dataset('y', data = y)
fd.close()
def loadCwsData(path):
'''载入分词训练输入样本'''
print('load cws data from %s'%path)
fd = h5py.File(path,'r')
X = fd['X'][:]
y = fd['y'][:]
fd.close()
return (X, y)
def sent2vec2(sent, vocab, ctxWindows = 5):
charVec = []
for char in sent:
if char in vocab:
charVec.append(vocab[char])
else:
charVec.append(vocab['retain-unknown'])
#首尾padding
num = len(charVec)
pad = int((ctxWindows - 1)/2)
for i in range(pad):
charVec.insert(0, vocab['retain-padding'] )
charVec.append(vocab['retain-padding'] )
X = []
for i in range(num):
X.append(charVec[i:i + ctxWindows])
return X
def sent2vec(sent, vocab, ctxWindows = 5):
chars = []
for char in sent:
chars.append(char)
return sent2vec2(chars, vocab, ctxWindows = ctxWindows)
def doc2vec(fname, vocab):
'''文档转向量'''
#一次性读入文件,注意内存
with codecs.open(fname, 'r', 'utf-8') as fd:
lines = fd.readlines()
#样本集
X = []
y = []
#标注统计信息
tagSize = len(corpus_tags)
tagCnt = [0 for i in range(tagSize)]
tagTranCnt = [[0 for i in range(tagSize)] for j in range(tagSize)]
#遍历行
for line in lines:
#按空格分割
words = line.strip('\n').split()
#每行的分词信息
chars = []
tags = []
#遍历词
for word in words:
#包含两个字及以上的词
if len(word) > 1:
#词的首字
chars.append(word[0])
tags.append(corpus_tags.index('B'))
#词中间的字
for char in word[1:(len(word) - 1)]:
chars.append(char)
tags.append(corpus_tags.index('M'))
#词的尾字
chars.append(word[-1])
tags.append(corpus_tags.index('E'))
#单字词
else:
chars.append(word)
tags.append(corpus_tags.index('S'))
#
lineVecX = sent2vec2(chars, vocab, ctxWindows = 7)
#统计标注信息
lineVecY = []
lastTag = -1
for tag in tags:
#向量
lineVecY.append(tag)
#lineVecY.append(corpus_tags[tag])
#统计tag频次
tagCnt[tag] += 1
#统计tag转移频次
if lastTag != -1:
tagTranCnt[lastTag][tag] += 1
#暂存上一次的tag
lastTag = tag
X.extend(lineVecX)
y.extend(lineVecY)
#字总频次
charCnt = sum(tagCnt)
#转移总频次
tranCnt = sum([sum(tag) for tag in tagTranCnt])
#tag初始概率
initProb = []
for i in range(tagSize):
initProb.append(tagCnt[i]/float(charCnt))
#tag转移概率
tranProb = []
for i in range(tagSize):
p = []
for j in range(tagSize):
p.append(tagTranCnt[i][j]/float(tranCnt))
tranProb.append(p)
return X, y, initProb, tranProb
def genVocab(fname, delimiters = [' ', '\n']):
#一次性读入文件,注意内存
with codecs.open(fname, 'r', 'utf-8') as f:
data = f.read()
vocab = {}
indexVocab = []
index = 0
for char in data:
#如果为分隔符则无需加入字典
if char not in delimiters and char not in vocab:
vocab[char] = index
indexVocab.append(char)
index += 1
#加入未登陆新词和填充词
vocab['retain-unknown'] = len(vocab)
vocab['retain-padding'] = len(vocab)
indexVocab.append('retain-unknown')
indexVocab.append('retain-padding')
return vocab, indexVocab
def load(fname):
print 'train from file', fname
delims = [' ', '\n']
vocab, indexVocab = genVocab(fname)
X, y, initProb, tranProb = doc2vec(fname, vocab)
print len(X), len(y), len(vocab), len(indexVocab)
print initProb
print tranProb
return (X, y), (initProb, tranProb), (vocab, indexVocab)
if __name__ == '__main__':
load('~/work/corpus/icwb2/training/msr_training.utf8') |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ncs1k_mxp_lldp_oper.py | Python | apache-2.0 | 5,511 | 0.022138 | """ Cisco_IOS_XR_ncs1k_mxp_lldp_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs1k\-mxp\-lldp package operational data.
This module contains definitions
for the following management objects\:
lldp\-snoop\-data\: Information related to LLDP Snoop
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class LldpSnoopData(object):
"""
Information related to LLDP Snoop
.. attribute:: ethernet_controller_names
Ethernet controller snoop data
**type**\: :py:class:`EthernetControllerNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper.LldpSnoopData.EthernetControllerNames>`
"""
_prefix = 'ncs1k-m | xp-lldp-oper'
_revision = '2015-11-09'
def __init__ | (self):
self.ethernet_controller_names = LldpSnoopData.EthernetControllerNames()
self.ethernet_controller_names.parent = self
class EthernetControllerNames(object):
"""
Ethernet controller snoop data
.. attribute:: ethernet_controller_name
port Name
**type**\: list of :py:class:`EthernetControllerName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper.LldpSnoopData.EthernetControllerNames.EthernetControllerName>`
"""
_prefix = 'ncs1k-mxp-lldp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ethernet_controller_name = YList()
self.ethernet_controller_name.parent = self
self.ethernet_controller_name.name = 'ethernet_controller_name'
class EthernetControllerName(object):
"""
port Name
.. attribute:: name <key>
Port name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: lldp_neighbor
LldpNeighbor
**type**\: str
**range:** 0..40
"""
_prefix = 'ncs1k-mxp-lldp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.lldp_neighbor = None
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:lldp-snoop-data/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:ethernet-controller-names/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:ethernet-controller-name[Cisco-IOS-XR-ncs1k-mxp-lldp-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.lldp_neighbor is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_lldp_oper as meta
return meta._meta_table['LldpSnoopData.EthernetControllerNames.EthernetControllerName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:lldp-snoop-data/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:ethernet-controller-names'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ethernet_controller_name is not None:
for child_ref in self.ethernet_controller_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_lldp_oper as meta
return meta._meta_table['LldpSnoopData.EthernetControllerNames']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-lldp-oper:lldp-snoop-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ethernet_controller_names is not None and self.ethernet_controller_names._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_lldp_oper as meta
return meta._meta_table['LldpSnoopData']['meta_info']
|
gobstones/PyGobstones | pygobstones/gui/editor.py | Python | gpl-3.0 | 5,582 | 0.002866 | from views.viewEditor import *
from views.boardPrint.board import *
from views.boardPrint.boardEditor import *
import views.resources
import boardOption
from helpOption import *
from pygobstones.commons.i18n import *
from pygobstones.commons.qt_utils import saveFileName
class Editor(QtGui.QWidget):
def __init__(self, parent, generator):
super(Editor, self).__init__()
self.parent = parent
self.ui = Ui_editor()
self.ui.setupUi(self)
icon = QtGui.QIcon(':/logoGobstones.png')
self.setWindowIcon(icon)
self.initcomboBoxLoad()
self.init_combo_box_persist()
self.init_combo_box_options()
self.boardOption = boardOption.BoardOption(self)
self.boardGenerator = generator
self.getInitialBoardFromMainWindow()
self.dictionary_load = {"Load Initial Board":'self.getInitialBoardFromMainWindow()',
"Load from disk":'self.loadBoardFromDisk()',
'Load from ...': 'self.nothing()'}
self.dictionary_persist = {'Persist board': 'self.nothing()',
'Set as initial board': 'self.setInitialBoardToMainWindow()',
'Save board to disk': 'self.saveBoardFromDisk()',
'Save board to image': 'self.saveBoardToImage()'}
self.dictionary_options = {'Options': 'self.nothing()',
'Options Board': 'self.openBoardOptionWindow()',
'User Options': 'self.openUserOptionsWindow()'}
def init_combo_box_options(self):
self.connect(self.ui.combo_box_options, QtCore.SIGNAL('activated(QString)'), self.combo_box_options_chosen)
self.ui.combo_box_options.addItem(i18n('Options'))
self.ui.combo_box_options.addItem(i18n('Options Board'))
self.ui.combo_box_options.addItem(i18n('User Options'))
def initcomboBoxLoad(self):
self.connect(self.ui.comboBoxLoad, QtCore.SIGNAL('activated(QString)'), self.comboBoxLoad_chosen)
self.ui.comboBoxLoad.addItem(i18n('Load from ...'))
self.ui.comboBoxLoad.addItem(i18n("Load Initial Board"))
self.ui.comboBoxLoad.addItem(i18n("Load from disk"))
def init_combo_box_persist(self):
self.connect(self.ui.combo_box_persist, QtCore.SIGNAL('activated(QString)'), self.combo_box_persist_chosen)
self.ui.combo_box_persist.addItem(i18n('Persist board'))
self.ui.combo_box_persist.addItem(i18n('Set as initial board'))
self.ui.combo_box_persist.addItem(i18n('Save board to disk'))
self.ui.combo_box_persist.addItem(i18n('Save board to image'))
def combo_box_persist_chosen(self, string):
exec(self.dictionary_persist[getEnglishTraduction(string)])
def comboBoxLoad_chosen(self, string):
exec(self.dictionary_load[getEnglishTraduction(string)])
def combo_box_options_chosen(self, string):
exec(self.dictionary_options[getEnglishTraduction(string)])
def nothing(self):
pass
def setInitialBoard(self, board):
self.board = board
self.boardGenerator.setInitialBoard(board)
self.ui.boardEditor.setBoard(self.board)
self.ui.boardEditor.populate()
def setInitialBoardToMainWindow(self):
self.board = boardToString(self.ui.boardEditor.getEditedBoard())
self.parent.setInitialBoard(self.board)
self.parent.setAtNothingBoardOptions()
self.reset_combo_persist()
self.reset_combo_options()
def getInitialBoardFromMainWindow(self):
board = self.boardGenerator.getStringBoard()
self.setInitialBoard(board)
self.reset_combo_load()
def openUserOptionsWindow(self):
self.command = CommandHelpWidget()
self.command.show()
self.reset_combo_options()
def openBoardOptionWindow(self):
self.boardOption.openBoardOptionWindow(self.parent.initialBoardGenerator)
self.reset_combo_options()
def update(self):
board = self.boardGenerator.getInitialBoard()
self.setInitialBoard(board)
self.ui.boardEditor.populate()
self.ui.boardEditor.update()
def loadBoardFromDisk(self):
self.boardOption.loadBoard()
self.reset_combo_load()
def saveBoar | dFromDisk(self):
self.board = boardToString(self.ui.boardEditor.getEditedBoard())
filename = saveFileName(self, '*.gbb')
if not filename == QtCore.QString(''):
(filep, filen) = os.path.split(str(filename))
| if not filename[-4:] == '.gbb':
filename = filename + '.gbb'
myFile = open(filename, 'w')
myFile.write(self.board)
myFile.close()
self.reset_combo_persist()
def saveBoardToImage(self):
filename = saveFileName(self, '*.png')
if not filename == QtCore.QString(''):
self.ui.boardEditor.save_to_image(filename)
self.reset_combo_persist()
def reset_combo_load(self):
self.ui.comboBoxLoad.setCurrentIndex(0)
def reset_combo_persist(self):
self.ui.combo_box_persist.setCurrentIndex(0)
def reset_combo_options(self):
self.ui.combo_box_options.setCurrentIndex(0)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.Antialiasing
painter.begin(self)
rect = QtCore.QRect(0, 0, 1920, 1080)
img = QtGui.QImage(':/backgroundWidget.png')
painter.drawImage(rect, img)
painter.end()
|
abonaca/gary | gary/coordinates/__init__.py | Python | mit | 124 | 0 | fr | om .core import *
from .sgr import *
from .orphan import *
from .propermotion import *
from .velocity_transforms import | *
|
shubhamdipt/django-autocomplete-light | test_project/select2_taggit/migrations/0001_initial.py | Python | mit | 1,006 | 0.002982 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-30 15:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='TModel',
fields=[
('id', models.AutoField( | auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
| ('for_inline', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inline_test_models', to='select2_taggit.TModel')),
('test', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
),
]
|
Bitka0/serenade | homepage/usermanag/views.py | Python | mit | 1,656 | 0.027174 | # coding: utf-8
# Copyright (c) 2011 Lukas Martini, Phillip Thelen.
# This file may be used and distributed under the terms found in the
# file COPYING, which you should have received along with this
# program. If you haven't, please refer to bofh@junge-piraten.de.
from django.utils.translation import ugettext_lazy as _
from django.template import Context, loader
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, ge | t_object_or_404, get_list_or_404, redirect
import util
from django.contrib.auth import authenticate, login, logout
def userlogin(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
lo | gin(request, user)
title = _("Login was successful")
message = _("Welcome {0}".format(username))
else:
title = _("Login failed")
message = _("The account is disabled")
else:
title = _("Login failed")
message = _("Username and/or password wrong")
context = util.generateContext(request, contextType = 'RequestContext', title = title, message = message)
return render_to_response('user/login.html', context)
def userlogout(request):
logout(request)
context = util.generateContext(request, contextType = 'RequestContext', title = _("Logout was successful"), message = _("You were successfully logged out"))
return render_to_response('user/login.html', context)
def showlogin(request):
context = util.generateContext(request, contextType = 'RequestContext', title = _("Login"))
return render_to_response('user/login.html', context)
|
hughperkins/kgsgo-dataset-preprocessor | thirdparty/future/tests/test_future/test_pasteurize.py | Python | mpl-2.0 | 7,644 | 0.001177 | # -*- coding: utf-8 -*-
"""
This module contains snippets of Python 3 code (invalid Python 2) and
tests for whether they can be passed to ``pasteurize`` and
immediately run under both Python 2 and Python 3.
"""
from __future__ import print_function, absolute_import
import pprint
from subprocess import Popen, PIPE
import tempfile
import os
from future.tests.base import CodeHandler, unittest, skip26
class TestPasteurize(CodeHandler):
"""
After running ``pasteurize``, these Python 3 code snippets should run
on both Py3 and Py2.
"""
@skip26 # Python 2.6's lib2to3 causes the "from builtins import
# range" line to be stuck at the bottom of the module!
def test_range_slice(self):
"""
After running ``pasteurize``, this Python 3 code should run
quickly on both Py3 and Py2 without a MemoryError
"""
code = '''
for i in range(10**15)[:10]:
pass
'''
self.unchanged(code, from3=True)
def test_print(self):
"""
This Python 3-only code is a SyntaxError on Py2 without the
print_function import from __future__.
"""
code = '''
import sys
print('Hello', file=sys.stderr)
'''
self.unchanged(code, from3=True)
def test_division(self):
"""
True division should not be screwed up by conversion from 3 to both
"""
code = '''
x = 3 / 2
assert x == 1.5
'''
self.unchanged(code, from3=True)
# TODO: write / fix the raise_ fixer so that it uses the raise_ function
@unittest.expectedFailure
def test_exception_indentation(self):
"""
As of v0.11.2, pasteurize broke the indentation of ``raise`` statements
using with_traceback. Test for this.
"""
before = '''
import sys
if True:
try:
'string' + 1
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("can't do that!").with_traceback(tb)
'''
after = '''
import sys
from future.utils import raise_with_traceback
if True:
try:
'string' + 1
except TypeError:
ty, va, tb = sys.exc_info()
raise_with_traceback(TypeError("can't do that!"), tb)
'''
self.convert_check(before, after, from3=True)
# TODO: fix and test this test
@unittest.expectedFailure
def test_urllib_request(self):
"""
Example Python 3 code using the new urllib.request module.
Does the ``pasteurize`` script handle this?
"""
before = """
import pprint
import urllib | .request
URL = 'http://pypi.python.org/pypi/{}/json'
package = 'future'
r = urllib.request.urlopen(URL.format(package))
pprint.pprin | t(r.read())
"""
after = """
import pprint
import future.standard_library.urllib.request as urllib_request
URL = 'http://pypi.python.org/pypi/{}/json'
package = 'future'
r = urllib_request.urlopen(URL.format(package))
pprint.pprint(r.read())
"""
self.convert_check(before, after, from3=True)
def test_urllib_refactor2(self):
before = """
import urllib.request, urllib.parse
f = urllib.request.urlopen(url, timeout=15)
filename = urllib.parse.urlparse(url)[2].split('/')[-1]
"""
after = """
from future.standard_library.urllib import request as urllib_request
from future.standard_library.urllib import parse as urllib_parse
f = urllib_request.urlopen(url, timeout=15)
filename = urllib_parse.urlparse(url)[2].split('/')[-1]
"""
@skip26 # mysterious sporadic UnicodeDecodeError raised by lib2to3 ...
def test_correct_exit_status(self):
"""
Issue #119: futurize and pasteurize were not exiting with the correct
status code. This is because the status code returned from
libfuturize.main.main() etc. was a ``newint``, which sys.exit() always
translates into 1!
"""
from libpasteurize.main import main
# Try pasteurizing this test script:
retcode = main([__file__])
self.assertTrue(isinstance(retcode, int)) # i.e. Py2 builtin int
class TestFuturizeAnnotations(CodeHandler):
@unittest.expectedFailure
def test_return_annotations_alone(self):
before = "def foo() -> 'bar': pass"
after = """
def foo(): pass
foo.__annotations__ = {'return': 'bar'}
"""
self.convert_check(before, after, from3=True)
b = """
def foo() -> "bar":
print "baz"
print "what's next, again?"
"""
a = """
def foo():
print "baz"
print "what's next, again?"
"""
self.convert_check(b, a, from3=True)
@unittest.expectedFailure
def test_single_param_annotations(self):
b = "def foo(bar:'baz'): pass"
a = """
def foo(bar): pass
foo.__annotations__ = {'bar': 'baz'}
"""
self.convert_check(b, a, from3=True)
b = """
def foo(bar:"baz"="spam"):
print("what's next, again?")
print("whatever.")
"""
a = """
def foo(bar="spam"):
print("what's next, again?")
print("whatever.")
foo.__annotations__ = {'bar': 'baz'}
"""
self.convert_check(b, a, from3=True)
def test_multiple_param_annotations(self):
b = "def foo(bar:'spam'=False, baz:'eggs'=True, ham:False='spaghetti'): pass"
a = "def foo(bar=False, baz=True, ham='spaghetti'): pass"
self.convert_check(b, a, from3=True)
b = """
def foo(bar:"spam"=False, baz:"eggs"=True, ham:False="spam"):
print("this is filler, just doing a suite")
print("suites require multiple lines.")
"""
a = """
def foo(bar=False, baz=True, ham="spam"):
print("this is filler, just doing a suite")
print("suites require multiple lines.")
"""
self.convert_check(b, a, from3=True)
def test_mixed_annotations(self):
b = "def foo(bar=False, baz:'eggs'=True, ham:False='spaghetti') -> 'zombies': pass"
a = "def foo(bar=False, baz=True, ham='spaghetti'): pass"
self.convert_check(b, a, from3=True)
b = """
def foo(bar:"spam"=False, baz=True, ham:False="spam") -> 'air':
print("this is filler, just doing a suite")
print("suites require multiple lines.")
"""
a = """
def foo(bar=False, baz=True, ham="spam"):
print("this is filler, just doing a suite")
print("suites require multiple lines.")
"""
self.convert_check(b, a, from3=True)
b = "def foo(bar) -> 'brains': pass"
a = "def foo(bar): pass"
self.convert_check(b, a, from3=True)
def test_functions_unchanged(self):
s = "def foo(): pass"
self.unchanged(s, from3=True)
s = """
def foo():
pass
pass
"""
self.unchanged(s, from3=True)
s = """
def foo(bar='baz'):
pass
pass
"""
self.unchanged(s, from3=True)
if __name__ == '__main__':
unittest.main()
|
henglinyang/Pace | TrainWeeks.py | Python | bsd-2-clause | 1,931 | 0.001554 | from datetime import date, timedelta
from sys import argv
class TrainWeeks(object):
def __init__(self, month, day, year, nr_weeks=16, nr_days=0):
self._race = date(year, month, day)
self._duration = timedelta(weeks=nr_weeks, days=nr_days)
self._start = self._race - self._duration + timedelta(1)
@property
def race_day(self):
return '{m}/{d}/{y}'.format(
m=self._race.month, d=self._race.day, y=self._race.year)
@property
def training_start_day(self):
return '{m}/{d}/{y}'.format(
m=self._start.month, d=self._start.day, y=self._start.year)
@property
def duration(self):
return self._duration.days
def __str__(self):
return 'Training of {d} days starts at {s} for race day {r} '.format(
r=self.race_day, s=self.training_start_day, d=self.duration)
def __repr__(self):
return 'TrainWeeks({m}, {d}, {y}, nr_days={u})'.format(
m=self._race.month,
d= | self._race.day,
y=self._race.year,
u=self._duration.days)
if __name__ == '__main__':
def usage_and_exit():
print('Today is {t}.\nUsage: {c} <mm/dd/yy> <number of weeks>'.format(
t=dat | e.today(), c=argv[0].split('/')[-1]))
exit()
argc = len(argv)
if argc == 1:
usage_and_exit()
date_minutes = argv[1].split('/')
if len(date_minutes) < 2:
usage_and_exit()
if len(date_minutes) < 3:
date_minutes.append(str(date.today().year))
if argc < 3:
tw = TrainWeeks(int(date_minutes[0]),
int(date_minutes[1]),
int(date_minutes[2]))
else:
tw = TrainWeeks(int(date_minutes[0]),
int(date_minutes[1]),
int(date_minutes[2]),
nr_weeks=int(argv[2]))
print(tw)
|
AKFourSeven/antoinekougblenou | old/wiki/includes/zhtable/Makefile.py | Python | mit | 13,221 | 0.037138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @author Philip
import tarfile as tf
import zipfile as zf
import os, re, shutil, sys, platform
pyversion = platform.python_version()
islinux = platform.system().lower() == 'linux'
if pyversion[:3] in ['2.6', '2.7']:
import urllib as urllib_request
import codecs
open = codecs.open
_unichr = unichr
if sys.maxunicode < 0x10000:
def unichr(i):
if i < 0x10000:
return _unichr(i)
else:
return _unichr( 0xD7C0 + ( i>>10 ) ) + _unichr( 0xDC00 + ( i & 0x3FF ) )
elif pyversion[:2] == '3.':
import urllib.request as urllib_request
unichr = chr
def unichr2( *args ):
return [unichr( int( i.split('<')[0][2:], 16 ) ) for i in args]
def unichr3( *args ):
return [unichr( int( i[2:7], 16 ) ) for i in args if i[2:7]]
# DEFINE
UNIHAN_VER = '5.2.0'
SF_MIRROR = 'cdnetworks-kr-2'
SCIM_TABLES_VER = '0.5.10'
SCIM_PINYIN_VER = '0.5.91'
LIBTABE_VER = '0.2.3'
# END OF DEFINE
def download( url, dest ):
if os.path.isfile( dest ):
print( 'File %s up to date.' % dest )
return
global islinux
if islinux:
# we use wget instead urlretrieve under Linux,
# because wget could display details like download progress
os.system( 'wget %s -O %s' % ( url, dest ) )
else:
print( 'Downloading from [%s] ...' % url )
urllib_request.urlretrieve( url, dest )
print( 'Download complete.\n' )
return
def uncompress( fp, member, encoding = 'U8' ):
name = member.rsplit( '/', 1 )[-1]
print( 'Extracting %s ...' % name )
fp.extract( member )
shutil.move( member, name )
if '/' in member:
shutil.rmtree( member.split( '/', 1 )[0] )
return open( name, 'rb', encoding, 'ignore' )
unzip = lambda path, member, encoding = 'U8': \
uncompress( zf.ZipFile( path ), member, encoding )
untargz = lambda path, member, encoding = 'U8': \
uncompress( tf.open( path, 'r:gz' ), member, encoding )
def parserCore( fp, pos, beginmark = None, endmark = None ):
if beginmark and endmark:
start = False
else: start = True
mlist = set()
for line in fp:
if beginmark and line.startswith( beginmark ):
start = True
continue
elif endmark and line.startswith( endmark ):
break
if start and not line.startswith( '#' ):
elems = line.split()
if len( elems ) < 2:
continue
elif len( elems[0] ) > 1 and \
len( elems[pos] ) > 1: # words only
mlist.add( elems[pos] )
return mlist
def tablesParser( path, name ):
""" Read file from scim-tables and parse it. """
global SCIM_TABLES_VER
src = 'scim-tables-%s/tables/zh/%s' % ( SCIM_TABLES_VER, name )
fp = untargz( path, src, 'U8' )
return parserCore( fp, 1, 'BEGIN_TABLE', 'END_TABLE' )
ezbigParser = lambda path: tablesParser( path, 'EZ-Big.txt.in' )
wubiParser = lambda path: tablesParser( path, 'Wubi.txt.in' )
zrmParser = lambda path: tablesParser( path, 'Ziranma.txt.in' )
def phraseParser( path ):
""" Read phrase_lib.txt and parse it. """
global SCIM_PINYIN_VER
src = 'scim-pinyin-%s/data/phrase_lib.txt' % SCIM_PINYIN_VER
dst = 'phrase_lib.txt'
fp = untargz( path, src, 'U8' )
return parserCore( fp, 0 )
def tsiParser( path ):
""" Read tsi.src and parse it. """
src = 'libtabe/tsi-src/tsi.src'
dst = 'tsi.src'
fp = untargz( path, src, 'big5hkscs' )
return parserCore( fp, 0 )
def unihanParser( path ):
""" Read Unihan_Variants.txt and parse it. """
fp = unzip( path, 'Unihan_Variants.txt', 'U8' )
t2s = dict()
s2t = dict()
for line in fp:
if line.startswith( '#' ):
continue
else:
elems = line.split()
if len( elems ) < 3:
continue
type = elems.pop( 1 )
elems = unichr2( *elems )
if type == 'kTraditionalVariant':
s2t[elems[0]] = elems[1:]
elif type == 'kSimplifiedVariant':
t2s[elems[0]] = elems[1:]
fp.close()
return ( t2s, s2t )
def applyExcludes( mlist, path ):
""" Apply exclude rules from path to mlist. """
excludes = open( path, 'rb', 'U8' ).read().split()
excludes = [word.split( '#' )[0].strip() for word in excludes]
excludes = '|'.join( excludes )
excptn = re.compile( '.*(?:%s).*' % excludes )
diff = [mword for mword in mlist if excptn.search( mword )]
mlist.difference_update( diff )
return mlist
def charManualTable( path ):
fp = open( path, 'rb', 'U8' )
ret = {}
for line in fp:
elems = line.split( '#' )[0].split( '|' )
elems = unichr3( *elems )
if len( elems ) > 1:
ret[elems[0]] = elems[1:]
return ret
def toManyRules( src_table ):
tomany = set()
for ( f, t ) in src_table.iteritems():
for i in range( 1, len( t ) ):
tomany.add( t[i] )
return tomany
def removeRules( path, table ):
fp = open( path, 'rb', 'U8' )
texc = list()
for line in fp:
elems = line.split( '=>' )
f = t = elems[0].strip()
if len( elems ) == 2:
t = elems[1].strip()
f = f.strip('"').strip("'")
t = t.strip('"').strip("'")
if f:
try:
table.pop( f )
except:
pass
if t:
texc.append( t )
texcptn = re.compile( '^(?:%s)$' % '|'.join( texc ) )
for (tmp_f, tmp_t) in table.copy().iteritems():
if texcptn.match( tmp_t ):
table.pop( tmp_f )
return table
def customRules( path ):
fp = open( path, 'rb', 'U8' )
ret = dict()
for line in fp:
elems = line.split( '#' )[0].split()
if len( elems ) > 1:
ret[elems[0]] = elems[1]
return | ret
def dictToSortedList( src_table, pos ):
return sorted( src_table.it | ems(), key = lambda m: m[pos] )
def translate( text, conv_table ):
i = 0
while i < len( text ):
for j in range( len( text ) - i, 0, -1 ):
f = text[i:][:j]
t = conv_table.get( f )
if t:
text = text[:i] + t + text[i:][j:]
i += len(t) - 1
break
i += 1
return text
def manualWordsTable( path, conv_table, reconv_table ):
fp = open( path, 'rb', 'U8' )
reconv_table = {}
wordlist = [line.split( '#' )[0].strip() for line in fp]
wordlist = list( set( wordlist ) )
wordlist.sort( key = len, reverse = True )
while wordlist:
word = wordlist.pop()
new_word = translate( word, conv_table )
rcv_word = translate( word, reconv_table )
if word != rcv_word:
reconv_table[word] = word
reconv_table[new_word] = word
return reconv_table
def defaultWordsTable( src_wordlist, src_tomany, char_conv_table, char_reconv_table ):
wordlist = list( src_wordlist )
wordlist.sort( key = len, reverse = True )
word_conv_table = {}
word_reconv_table = {}
conv_table = char_conv_table.copy()
reconv_table = char_reconv_table.copy()
tomanyptn = re.compile( '(?:%s)' % '|'.join( src_tomany ) )
while wordlist:
conv_table.update( word_conv_table )
reconv_table.update( word_reconv_table )
word = wordlist.pop()
new_word_len = word_len = len( word )
while new_word_len == word_len:
add = False
test_word = translate( word, reconv_table )
new_word = translate( word, conv_table )
if not reconv_table.get( new_word ) \
and ( test_word != word \
or ( tomanyptn.search( word ) \
and word != translate( new_word, reconv_table ) ) ):
word_conv_table[word] = new_word
word_reconv_table[new_word] = word
try:
word = wordlist.pop()
except IndexError:
break
new_word_len = len(word)
return word_reconv_table
def PHPArray( table ):
|
jnidzwetzki/bboxdb | bin/experiments/experiment_sampling_calculate_std.py | Python | apache-2.0 | 2,322 | 0.030146 | #!/usr/bin/python
#
# Calculate the standard deviation of the
# sampling size experiment
#
############################################
import sys
import re
import math
# Check args
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], "<filename>"
sys.exit(0)
# Regex
samplingSizePattern = re.compile("^Simulating with sample size: | ([\d\.]+)");
experimentPattern = re.compile("^(\d+)\s(\d+)\s(\d+)\s\d+\s\d+")
class Experiment(object):
samplingSize = -1
leftRegion = []
totalElements = -1
def __init__(self, samplingSize):
self.samplingSize = sa | mplingSize
self.leftRegion = []
self. totalElements = -1
def get_std_str(self):
'''Error in STD'''
totalDiff = 0;
for result in self.leftRegion:
diff = abs(self.totalElements/2 - result)
totalDiff = totalDiff + math.pow(diff, 2)
std = math.sqrt(totalDiff / len(self.leftRegion))
stdPer = float(std) / float(self.totalElements) * 100.0
'''Error on AVG'''
totalDiff = 0;
for result in self.leftRegion:
totalDiff = totalDiff + abs(self.totalElements/2 - result)
average = totalDiff / len(self.leftRegion)
averagePer = float(average) / float(self.totalElements) * 100.0
return self.samplingSize + "\t" + str(stdPer) + "\t" + str(averagePer)
def __str__(self):
return self.get_std_str()
def set_total_elements(self, totalElements):
self.totalElements = int(totalElements)
def append_experiment_result(self, result):
self.leftRegion.append(int(result))
# Global variables
experiment = None
''' Handle a line of the input file'''
def handleLine(line):
global experiment
experimentMatcher = experimentPattern.match(line)
if experimentMatcher:
experimentRun = experimentMatcher.group(1)
experimentTotal = experimentMatcher.group(2)
experimentLeft = experimentMatcher.group(3)
experiment.set_total_elements(experimentTotal)
experiment.append_experiment_result(experimentLeft)
# print line,
sampleMatcher = samplingSizePattern.match(line)
if sampleMatcher:
if not experiment is None:
print experiment
experiment = Experiment(sampleMatcher.group(1))
print "#Sampling size STD error AVG errror"
''' Read file '''
filename = sys.argv[1]
fh = open(filename, "r")
for line in fh:
handleLine(line)
fh.close();
''' Print last experiment '''
if not experiment is None:
print experiment
|
sejust/pykit | zkutil/__init__.py | Python | mit | 1,117 | 0 | from .exceptions imp | ort (
ZKWaitTimeout,
)
from .zkacid import (
cas_loop,
)
from .zkconf import (
KazooClientExt,
ZKConf,
kazoo_client_ext,
)
from .zkutil import (
PermTypeError,
close_zk,
init_hierarchy,
export_hierarchy,
is_backward_locking,
lock_id,
make_acl_entry,
make_digest,
make_kazoo_digest | _acl,
parse_kazoo_acl,
parse_lock_id,
perm_to_long,
perm_to_short,
wait_absent,
get_next,
)
from .zklock import (
LockTimeout,
ZKLock,
make_identifier,
)
from .cached_reader import (
CachedReader,
)
__all__ = [
"PermTypeError",
"ZKWaitTimeout",
"cas_loop",
"KazooClientExt",
"ZKConf",
"kazoo_client_ext",
"close_zk",
"init_hierarchy",
"export_hierarchy",
"is_backward_locking",
"lock_id",
"make_acl_entry",
"make_digest",
"make_kazoo_digest_acl",
"parse_kazoo_acl",
"parse_lock_id",
"perm_to_long",
"perm_to_short",
"wait_absent",
"get_next",
"LockTimeout",
"ZKLock",
"CachedReader",
"make_identifier",
]
|
tracykteal/replicate-filter | scripts/repeats_html_template.py | Python | gpl-3.0 | 2,972 | 0.004711 | HTML_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/
DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<!--
Copyright: Darren Hester 2006, http://www.designsbydarren.com
License: Released Under the "Creative Commons License", http://creativecommons.org/licenses/by-nc/2.5/
-->
<head>
<!-- Meta Data -->
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" /><meta name="description" content="Free 2-Column CSS Web Design Template" /><meta name="keywords" content="Free, 2-Column, CSS, Web, Design, Template" />
<!-- Site Title -->
<title>454 Replicate Filter</title>
<!-- Link to Style External Sheet -->
<link href="../../css/style.css" type="text/css" rel="stylesheet" />
</head>
<body>
<div id="page_wrapper">
<div id="page_header">
<h1>Schmidt Lab</h1>
<h2>Power, efficiency and microbial communities</h2>
</div>
<div id="menu_bar">
<ul>
<li><a href="#">Home</a></li>
<li><a href="#">Research</a></li>
<li><a href="#">Data</a></li>
<li><a href="#">Software</a></li>
<li><a href="#">Protocols</a></li>
</ul>
</div>
<div id="content_wrapper">
<div id="left_side">
<h2>454 Replicate Filter</h2>
<h3>Data summary</h3>
File: %s
<br>Evaluated with:<br> cutoff %s, length requirement %s0 and initial base pair match %s
%s
<h3>Files</h3>
<p>Right click or Control-click on the file name and you will be able to download it.
<table>
<tr><td>Set of unique reads:</td> <td><a href=%s/%s/%s_unique.fa>Fasta file</a></td></tr>
<tr><td>Summary of clusters: <td><a href=%s/%s/%s.cluster_summary>cluster summary text</a>
<tr><td>Sequences in each cluster: <td><a href=%s/%s/%s.fasta_clusters>cluster list</a>
</table>
<p>--------------------------------
<br>Version 1.0 - updated March 26, 2009
</div>
<div id="right_side">
<p class="block"><strong>Note: </strong>
Sequences that cluster together by <a href=http://www.bioinformatics.org/cd-hit/>CD\
-HIT</a> and start
with the same beginning base pairs are identified as replicates and clustered.
If many sequences are expected to look similiar and start at the same position, thi\
s is not
the right tool for your data, e.g. 454 tag data.
<p class="block"><strong>Availability:</strong>
<br>These scripts are all open source and distributed under the Gnu GPL. They can also be run at the command line witho\
ut the web interface. They are currently available | if you contact the authors and will be made available here soon.
<p class="block"><s | trong>Comments/Questions:</strong>
<br>If you have any comments or questions about these programs, please contact the \
<a href=mailto:tkteal@msu.edu>authors</a>.
</div>
</div>
<div id="page_footer">
<p><font size=-2> <br />
<!--
<a href="http://validator.w3.org/check?uri=referer" target="_blank">Valid XHTML 1.0 Transitional</a></p></font>
-->
</div>
</div>
</body>
</html>
"""
|
krafczyk/spack | var/spack/repos/builtin/packages/perl-soap-lite/package.py | Python | lgpl-2.1 | 1,976 | 0.001012 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the | Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with thi | s program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlSoapLite(PerlPackage):
"""Perl's Web Services Toolkit"""
homepage = "http://search.cpan.org/~phred/SOAP-Lite-1.20/lib/SOAP/Lite.pm"
url = "http://search.cpan.org/CPAN/authors/id/P/PH/PHRED/SOAP-Lite-1.22.tar.gz"
version('1.22', '86c31341311498a08c6309e62168f655')
depends_on('perl-io-sessiondata', type=('build', 'run'))
depends_on('perl-lwp-protocol-https', type=('build', 'run'))
depends_on('perl-task-weaken', type=('build', 'run'))
depends_on('perl-xml-parser-lite', type=('build', 'run'))
depends_on('perl-xml-parser', type=('build', 'run'))
depends_on('perl-test-warn', type=('build', 'run'))
depends_on('perl-class-inspector', type=('build', 'run'))
|
birdchan/project_euler | problems/014/run.py | Python | mit | 979 | 0.034729 |
import math
import string
lookup_map = {}
def memcache_read(n):
global lookup_map
if lookup_map.has_key(n):
return lookup_map[n]
else:
return None
def memcache_write(n, value):
global lookup_map
lookup_map[n] = value
def get_chain_length(n):
| # check cache
cache = memcache_read(n)
if cache != None:
return cache
# no cache, so caculate
if n <= 1:
memcache_write(1, 1)
return 1
if n % 2 == 0:
n = n / 2
else:
n = 3*n + 1
return get_chain_length(n) + 1
def find_longest_chain_under_N(n):
max_chai | n_num = -1
max_chain_length = 0
for i in xrange(1, n, 1):
chain_length = get_chain_length(i)
memcache_write(i, chain_length)
if chain_length > max_chain_length:
max_chain_length = chain_length
max_chain_num = i
#print max_chain_num
#print max_chain_length
return max_chain_num
if __name__ == '__main__':
#print find_longest_chain_under_N(3)
print find_longest_chain_under_N(1000000)
|
getpatchwork/patchwork | patchwork/api/__init__.py | Python | gpl-2.0 | 1,807 | 0 | # Patchwork - automated patch tracking system
# Copyright (C) 2020, Stephen Finucane <stephen@that.guru>
#
# SPDX-License-Identifier: GPL-2.0-or-later
from rest_framework.fields import empty
from rest_framework.fields import get_attribute
from rest_framework.fields import SkipField
from rest_framework.relations import ManyRelatedField
# mon | key patch django-rest-framework to work around issue #7550 [1] until #7574
# [2] or some other variant lands
#
# [1] https://github | .com/encode/django-rest-framework/issues/7550
# [2] https://github.com/encode/django-rest-framework/pull/7574
def _get_attribute(self, instance):
# Can't have any relationships if not created
if hasattr(instance, 'pk') and instance.pk is None:
return []
try:
relationship = get_attribute(instance, self.source_attrs)
except (KeyError, AttributeError) as exc:
if self.default is not empty:
return self.get_default()
if self.allow_null:
return None
if not self.required:
raise SkipField()
msg = (
'Got {exc_type} when attempting to get a value for field '
'`{field}` on serializer `{serializer}`.\nThe serializer '
'field might be named incorrectly and not match '
'any attribute or key on the `{instance}` instance.\n'
'Original exception text was: {exc}.'.format(
exc_type=type(exc).__name__,
field=self.field_name,
serializer=self.parent.__class__.__name__,
instance=instance.__class__.__name__,
exc=exc
)
)
raise type(exc)(msg)
return relationship.all() if hasattr(relationship, 'all') else relationship
ManyRelatedField.get_attribute = _get_attribute
|
kylewmoser/HistoryEmergent | run.py | Python | mit | 187 | 0.005348 | from histo | ryemergent import app
# Comment out the line below if you are using something other than the built-in
# Flask development server
app.run(host='127.0.0.1', port=5000, debug=Tru | e) |
seims/SEIMS | preprocess/post_process_taudem.py | Python | gpl-2.0 | 8,376 | 0.000955 | #! /usr/bin/env python
# coding=utf-8
# Post process of TauDEM
# 1. convert subbasin raster to polygon shapefile
# 2. add width and default depth to reach.shp
# @Author: Junzhi Liu, 2012-4-12
# @Revised: Liang-Jun Zhu, 2016-7-7
#
import platform
from osgeo import ogr
from chwidth import chwidth
from config import *
from util import *
def GenerateSubbasinVector(subbasinRaster, subbasinVector, layerName, fieldName):
RemoveShpFile(subbasinVector)
# raster to polygon vector
if platform.system() == 'Windows':
exepath = '"%s/Scripts/gdal_polygonize.py"' % sys.exec_prefix
else:
exepath = GetExecutableFullPath("gdal_polygonize.py")
strCmd = '%s -f "ESRI Shapefile" %s %s %s %s' % (exepath, subbasinRaster, subbasinVector, layerName, fieldName)
print strCmd
# os.system(strCmd)
process = subprocess.Popen(strCmd, shell=True, stdout=subprocess.PIPE)
print process.stdout.readlines()
def SerializeStreamNet(streamNetFile, outputReachFile):
CopyShpFile(streamNetFile, outputReachFile)
dsReach = ogr.Open(outputReachFile, update=True)
layerReach = dsReach.GetLayer(0)
layerDef = layerReach.GetLayerDefn()
iLink = layerDef.GetFieldIndex(FLD_LINKNO)
iLinkDownSlope = layerDef.GetFieldIndex(FLD_DSLINKNO)
iLen = layerDef.GetFieldIndex(REACH_LENGTH)
oldIdList = []
# there are some reaches with zero length.
# this program will remove these zero-length reaches
# outputDic is used to store the downstream reaches of these zero-length
# reaches
outputDic = {}
ft = layerReach.GetNextFeature()
while ft is not None:
id = ft.GetFieldAsInteger(iLink)
reachLen = ft.GetFieldAsDouble(iLen)
if not id in oldIdList:
if reachLen < UTIL_ZERO:
downstreamId = ft.GetFieldAsInteger(iLinkDownSlope)
outputDic[id] = downstreamId
else:
oldIdList.append(id)
ft = layerReach.GetNextFeature()
oldIdList.sort()
idMap = {}
n = len(oldIdList)
for i in range(n):
idMap[oldIdList[i]] = i + 1
# print idMap
# change old ID to new ID
layerReach.ResetReading()
ft = layerReach.GetNextFeature()
while ft is not None:
id = ft.GetFieldAsInteger(iLink)
if not id in idMap.keys():
layerReach.DeleteFeature(ft.GetFID())
ft = layerReach.GetNextFeature()
continue
dsId = ft.GetFieldAsInteger(iLinkDownSlope)
dsId = outputDic.get(dsId, dsId)
dsId = outputDic.get(dsId, dsId)
ft.SetField(FLD_LINKNO, idMap[id])
if dsId in idMap.keys():
ft.SetField(FLD_DSLINKNO, idMap[dsId])
else:
# print dsId
ft.SetField(FLD_DSLINKNO, -1)
layerReach.SetFeature(ft)
ft = layerReach.GetNextFeature()
dsReach.ExecuteSQL("REPACK reach")
layerReach.SyncToDisk()
dsReach.Destroy()
del dsReach
return idMap
def SerializeSubbasin(subbasinFile, streamRasterFile, idMap,
outputSubbasinFile, outputStreamLinkFile):
subbasin = ReadRaster(subbasinFile)
nRows = subbasin.nRows
nCols = subbasin.nCols
noDataValue = subbasin.noDataValue
data = subbasin.data
streamRaster = ReadRaster(streamRasterFile)
dataStream = streamRaster.data
noDataValueStream = streamRaster.noDataValue
# print noDataValueStream
outputSubbasin = numpy.zeros((nRows, nCols))
outputStream = numpy.zeros((nRows, nCols))
n = len(idMap)
print "number of reaches: ", n
for i in range(nRows):
for j in range(nCols):
if abs(data[i][j] - noDataValue) < UTIL_ZERO:
outputSubbasin[i][j] = noDataValue
else:
# error if the outlet subbasin contains only one grid, i.e.,
# there is no reach for this subbasin
outputSubbasin[i][j] = idMap[int(data[i][j])]
if dataStream[i][j] < UTIL_ZERO:
outputStream[i][j] = noDa | taValueStream
else:
outputStream[i][j] = outputSubbasin[i][j]
WriteGTiffFile(outputSubbasinFile, nRows, nCols, outputSubbasin,
subbasin.geotrans, subbasin.srs, noDataValue, gdal.GDT_Int32)
WriteGTiffFile(outputStreamLinkFile, nRows, nCols, outputStream,
streamRaster.geotrans, stream | Raster.srs, noDataValue, gdal.GDT_Int32)
def ChangeFlowDir(flowDirFileTau, flowDirFileEsri):
# flowDirFileTau is float
dirMap = {1.: 1.,
2.: 128.,
3.: 64.,
4.: 32.,
5.: 16.,
6.: 8.,
7.: 4.,
8.: 2.}
replaceByDict(flowDirFileTau, dirMap, flowDirFileEsri)
def AddWidthToReach(reachFile, stramLinkFile, width):
streamLink = ReadRaster(stramLinkFile)
nRows = streamLink.nRows
nCols = streamLink.nCols
noDataValue = streamLink.noDataValue
dataStream = streamLink.data
chWidthDic = {}
chNumDic = {}
for i in range(nRows):
for j in range(nCols):
if abs(dataStream[i][j] - noDataValue) > UTIL_ZERO:
id = int(dataStream[i][j])
chNumDic.setdefault(id, 0)
chWidthDic.setdefault(id, 0)
chNumDic[id] = chNumDic[id] + 1
chWidthDic[id] = chWidthDic[id] + width[i][j]
for k in chNumDic.keys():
chWidthDic[k] = chWidthDic[k] / chNumDic[k]
# add channel width field to reach shp file
dsReach = ogr.Open(reachFile, update=True)
layerReach = dsReach.GetLayer(0)
layerDef = layerReach.GetLayerDefn()
iLink = layerDef.GetFieldIndex(FLD_LINKNO)
iWidth = layerDef.GetFieldIndex(REACH_WIDTH)
iDepth = layerDef.GetFieldIndex(REACH_DEPTH)
if (iWidth < 0):
new_field = ogr.FieldDefn(REACH_WIDTH, ogr.OFTReal)
layerReach.CreateField(new_field)
if (iDepth < 0):
new_field = ogr.FieldDefn(REACH_DEPTH, ogr.OFTReal)
layerReach.CreateField(new_field)
# grid_code:feature map
# ftmap = {}
layerReach.ResetReading()
ft = layerReach.GetNextFeature()
while ft is not None:
id = ft.GetFieldAsInteger(iLink)
w = 1
if id in chWidthDic.keys():
w = chWidthDic[id]
ft.SetField(REACH_WIDTH, w)
ft.SetField(REACH_DEPTH, default_reach_depth)
layerReach.SetFeature(ft)
ft = layerReach.GetNextFeature()
layerReach.SyncToDisk()
dsReach.Destroy()
del dsReach
def PostProcessTauDEM(dstdir):
tauDir = dstdir + os.sep + DIR_NAME_TAUDEM
streamNetFile = tauDir + os.sep + streamNet
subbasinFile = tauDir + os.sep + subbasinM
flowDirFileTau = tauDir + os.sep + flowDirM
streamRasterFile = tauDir + os.sep + streamRasterM
reachDir = dstdir + os.sep + DIR_NAME_REACH
if not os.path.exists(reachDir):
os.mkdir(reachDir)
outputReachFile = reachDir + os.sep + reachesOut
outputSubbasinFile = dstdir + os.sep + subbasinOut
outputFlowDirFile = dstdir + os.sep + flowDirOut
outputStreamLinkFile = dstdir + os.sep + streamLinkOut
subbasinDir = dstdir + os.sep + DIR_NAME_SUBBSN
if not os.path.exists(subbasinDir):
os.mkdir(subbasinDir)
subbasinVectorFile = subbasinDir + os.sep + subbasinVec
idMap = SerializeStreamNet(streamNetFile, outputReachFile)
SerializeSubbasin(subbasinFile, streamRasterFile, idMap,
outputSubbasinFile, outputStreamLinkFile)
# Change TauDEM code to ArcGIS. Now, it is deprecated, By LJ.
if(isTauDEM):
shutil.copy(flowDirFileTau, outputFlowDirFile)
else:
ChangeFlowDir(flowDirFileTau, outputFlowDirFile)
accFile = dstdir + os.sep + accM
chwidthFile = dstdir + os.sep + chwidthName
width = chwidth(accFile, chwidthFile)
AddWidthToReach(outputReachFile, outputStreamLinkFile, width)
print "Generating subbasin vector..."
GenerateSubbasinVector(outputSubbasinFile, subbasinVectorFile, "subbasin", FLD_SUBBASINID)
maskFile = dstdir + os.sep + mask_to_ext
basinVector = dstdir + os.sep + basinVec
print "Gene |
half2me/libant | libAnt/node.py | Python | mit | 4,209 | 0.001663 | import threading
from queue import Queue, Empty
from time import sleep
from libAnt.drivers.driver import Driver
from libAnt.message import *
class Network:
def __init__(self, key: bytes = b'\x00' * 8, name: str = None):
self.key = key
self.name = name
self.number = 0
def __str__(self):
return self.name
class Pump(threading.Thread):
def __init__(self, driver: Driver, initMessages, out: Queue, onSucces, onFailure):
super().__init__()
self._stopper = threading.Event()
self._driver = driver
self._out = out
self._initMessages = initMessages
self._waiters = []
self._onSuccess = onSucces
self._onFailure = onFailure
def stop(self):
self._driver.abort()
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while not self.stopped():
try:
with self._driver as d:
# Startup
rst = SystemResetMessage()
self._waiters.append(rst)
d.write(rst)
for m in self._initMessages:
self._waiters.append(m)
d.write(m)
while not self.stopped():
# Write
try:
outMsg = self._out.get(block=False)
self._waiters.append(outMsg)
d.write(outMsg)
except Empty:
pass
# Read
try:
msg = d.read(timeout=1)
if msg.type == MESSAGE_CHANNEL_EVENT:
# This is a response to our outgoing message
for w in self._waiters:
if w.type == msg.content[1]: # ACK
self._waiters.remove(w)
# TODO: Call waiter callback from tuple (waiter, callback)
break
elif msg.type == MESSAGE_CHANNEL_BROADCAST_DATA:
bmsg = BroadcastMessage(msg.type, msg.content).build(msg.content)
self._onSuccess(bmsg)
except Empty:
pass
except Exception as e:
self._onFailure(e)
except:
pass
s | elf._waiters.clear()
sleep(1)
class Node:
def __init__(self, driver: Driver, name: str = None):
self._driver = driver
self._name = name
self._out = Queue()
self._init = []
self._pump = None
self._configMessages = Queue()
| def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, onSuccess, onFailure):
if not self.isRunning():
self._pump = Pump(self._driver, self._init, self._out, onSuccess, onFailure)
self._pump.start()
def enableRxScanMode(self, networkKey=ANTPLUS_NETWORK_KEY, channelType=CHANNEL_TYPE_ONEWAY_RECEIVE,
frequency: int = 2457, rxTimestamp: bool = True, rssi: bool = True, channelId: bool = True):
self._init.append(SystemResetMessage())
self._init.append(SetNetworkKeyMessage(0, networkKey))
self._init.append(AssignChannelMessage(0, channelType))
self._init.append(SetChannelIdMessage(0))
self._init.append(SetChannelRfFrequencyMessage(0, frequency))
self._init.append(EnableExtendedMessagesMessage())
self._init.append(LibConfigMessage(rxTimestamp, rssi, channelId))
self._init.append(OpenRxScanModeMessage())
def stop(self):
if self.isRunning():
self._pump.stop()
self._pump.join()
def isRunning(self):
if self._pump is None:
return False
return self._pump.is_alive()
def getCapabilities(self):
pass
|
thushear/MLInAction | kaggle/blending.py | Python | apache-2.0 | 3,727 | 0.00161 | """Kaggle competition: Predicting a Biological Response.
Blending {RandomForests, ExtraTrees, GradientBoosting} + stretching to
[0,1]. The blending scheme is related to the idea Jose H. Solorzano
presented here:
http://www.kaggle.com/c/bioresponse/forums/t/1889/question-about-the-process-of-ensemble-learning/10950#post10950
'''You can try this: In one of the 5 folds, train the models, then use
the results of the models as 'variables' in logistic regression over
the validation data of that fold'''. Or at least this is the
implementation of my understanding of that idea :-)
The predictions are saved in test.csv. The code below created my best
submission to the competition:
- public score (25%): 0.43464
- private score (75%): 0.37751
- final rank on the private leaderboard: 17th over 711 teams :-)
Note: if you increase the number of estimators of the classifiers,
e.g. n_estimators=1000, you get a better score/rank on the private
test set.
Copyright 2012, Emanuele Olivetti.
BSD license, 3 clauses.
"""
from __future__ import division
import numpy as np
import load_data
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
def logloss(attempt, actual, epsilon=1.0e-15):
"""Logloss, i.e. the score of the bioresponse competition.
"""
attempt = np.clip(attempt, epsilon, 1.0-epsilon)
return - np.mean(actual * np.log(attempt) +
(1.0 - actual) * np.log(1.0 - attempt))
if __name__ == '__main__':
np.random.seed(0) # seed to shuffle the train set
n_folds = 10
verbose = True
shuffle = False
X, y, X_submission = load_data.load()
if shuffle:
idx = np.random.permutation(y.size)
X = X[idx]
y = y[idx]
skf = list(StratifiedKFold(y, n_folds))
clfs = [RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=50)]
print "Creating train and test sets for blending."
dataset_blend_train = np.zeros((X.shape[0], len(clfs)))
dataset_blend_test = np.zeros((X_submission.shape[0], len(clfs)))
for j, clf in enumerate(clfs):
print j, clf
dataset_blend_test_j = np.zeros((X_submission.shape[0], len(skf)))
for i, (train, test) in enumerate(skf):
| print "Fold", i
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
clf.fit(X | _train, y_train)
y_submission = clf.predict_proba(X_test)[:, 1]
dataset_blend_train[test, j] = y_submission
dataset_blend_test_j[:, i] = clf.predict_proba(X_submission)[:, 1]
dataset_blend_test[:, j] = dataset_blend_test_j.mean(1)
print
print "Blending."
clf = LogisticRegression()
clf.fit(dataset_blend_train, y)
y_submission = clf.predict_proba(dataset_blend_test)[:, 1]
print "Linear stretch of predictions to [0,1]"
y_submission = (y_submission - y_submission.min()) / (y_submission.max() - y_submission.min())
print "Saving Results."
tmp = np.vstack([range(1, len(y_submission)+1), y_submission]).T
np.savetxt(fname='submission.csv', X=tmp, fmt='%d,%0.9f',
header='MoleculeId,PredictedProbability', comments='')
|
dymkowsk/mantid | scripts/Reflectometry/isis_reflectometry/saveModule.py | Python | gpl-3.0 | 3,083 | 0.025949 | #pylint: disable=invalid-name
from __future__ import (abso | lute_import, division, print_function)
from PyQt4 import QtCore
from mantid.simpleapi import *
import numpy as n
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
def saveCustom(idx,fname,sep = ' ',logs = [],title = False,error = False):
fname+='.dat'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
titl='#'+a1.getTitle()+'\n'
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
| for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
f=open(fname,'w')
if title:
f.write(titl)
samp = a1.getRun()
for log in logs:
prop = samp.getLogData(str(log.text()))
headerLine='#'+log.text() + ': ' + str(prop.value) + '\n'
print(headerLine)
f.write(headerLine)
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
if error:
dq=X1[i]*qres
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
else:
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i]+ "\n"
f.write(s)
f.close()
def saveANSTO(idx,fname):
fname+='.txt'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
sep='\t'
f=open(fname,'w')
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
dq=X1[i]*qres
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
f.write(s)
f.close()
def saveMFT(idx,fname,logs):
fname+='.mft'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
sep='\t'
f=open(fname,'w')
f.write('MFT\n')
f.write('Instrument: '+a1.getInstrument().getName()+'\n')
f.write('User-local contact: \n')
f.write('Title: \n')
samp = a1.getRun()
s = 'Subtitle: '+samp.getLogData('run_title').value+'\n'
f.write(s)
s = 'Start date + time: '+samp.getLogData('run_start').value+'\n'
f.write(s)
s = 'End date + time: '+samp.getLogData('run_end').value+'\n'
f.write(s)
for log in logs:
prop = samp.getLogData(str(log.text()))
headerLine=log.text() + ': ' + str(prop.value) + '\n'
print(headerLine)
f.write(headerLine)
f.write('Number of file format: 2\n')
s = 'Number of data points:\t' + str(len(X1))+'\n'
f.write(s)
f.write('\n')
f.write('\tq\trefl\trefl_err\tq_res\n')
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
dq=X1[i]*qres
s="\t%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
f.write(s)
f.close()
|
samabhi/pstHealth | venv/lib/python2.7/site-packages/gunicorn/workers/geventlet.py | Python | mit | 1,452 | 0.003444 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import os
try:
import eventlet
except ImportError:
raise RuntimeError("You need eventlet installed to use this worker.")
from eventlet import hubs
from eventlet.greenio import GreenSocket
from gunicorn.workers.async import AsyncWorker
class EventletWorker(AsyncWorker):
@classmethod
def setup(cls):
import eventlet
if eventlet.version_info < (0,9,7):
raise RuntimeError("You need eventlet >= 0.9.7")
eventlet.monkey_patch(os=False)
def init_process(self):
hubs.use_ | hub()
super(EventletWorker, self).init_process()
def timeout_ctx(self):
return eventlet.Timeout(self.cfg.keepalive, False)
def run(self):
self.socket = GreenSocket(family_or_realsock=self.socket.sock)
self.socket.setblocking(1)
self.acceptor = eventlet.spawn(event | let.serve, self.socket,
self.handle, self.worker_connections)
while self.alive:
self.notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
break
eventlet.sleep(1.0)
self.notify()
with eventlet.Timeout(self.timeout, False):
eventlet.kill(self.acceptor, eventlet.StopServe)
|
OpenCanada/website | articles/migrations/0030_auto_20150806_2136.py | Python | mit | 928 | 0.002155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import wagtail.core.blocks
import wagtail.core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0029_auto_20150811_1924'),
]
operations = [
migrations.AddField(
model_name='chapteredarticlepag | e',
name='end_notes',
field=wagtail.core.fields.StreamField([('end_note', wagtail.core.blocks.StructBlock([(b'identifier', wagtail.core.blocks.CharBlock()), (b'text', wagtail.core.blocks.TextBlock())]))], null=True, blank=True),
),
migrations.AddField(
model_name='chapteredarticlepage',
name='works_cited',
field=wagtail.core.fields.StreamField([('citation', wagtail.core.blocks.StructBlock([(b'text', wagtail.core.blocks.T | extBlock())]))], null=True, blank=True),
),
]
|
111pontes/ydk-py | core/ydk/providers/_importer.py | Python | apache-2.0 | 1,938 | 0.001032 | # ----------------------------------------------------------- | -----
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
| # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" _importer.py
Merge _yang_ns for subpackage to a single _yang_ns at runtime.
"""
import importlib
import pkgutil
from ydk import models
class YangNs(object):
def __init__(self, d):
self.__dict__ = d
_yang_ns_dict = {}
exempt_keys = set(['__builtins__', '__doc__', '__file__',
'__name__', '__package__'])
try:
_yang_ns = importlib.import_module('ydk.models._yang_ns')
except ImportError:
for (importer, name, ispkg) in pkgutil.iter_modules(models.__path__):
if ispkg:
try:
mod_yang_ns = importlib.import_module('ydk.models.%s._yang_ns' % name)
except ImportError:
continue
keys = set(mod_yang_ns.__dict__) - exempt_keys
for key in keys:
if key not in _yang_ns_dict:
_yang_ns_dict[key] = mod_yang_ns.__dict__[key]
else:
if isinstance(_yang_ns_dict[key], dict):
_yang_ns_dict[key].update(mod_yang_ns.__dict__[key])
else:
# shadow old value
_yang_ns_dict[key] = mod_yang_ns.__dict__[key]
_yang_ns = YangNs(_yang_ns_dict)
|
emacsway/ascetic | ascetic/contrib/polymorphic.py | Python | mit | 7,432 | 0.00148 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from ascetic import exceptions
from ascetic.mappers import Load, Mapper, OneToOne, Result
from ascetic.utils import to_tuple
from ascetic.utils import cached_property
from ascetic.contrib.gfk import GenericForeignKey
# TODO: Support for native support inheritance:
# http://www.postgresql.org/docs/9.4/static/tutorial-inheritance.html
# http://www.postgresql.org/docs/9.4/static/ddl-inherit.html
class NativePolymorphicMapper(object):
pass
class PolymorphicMapper(Mapper):
result_factory = staticmethod(lambda *a, **kw: PolymorphicResult(*a, **kw))
def get_polymorphic_bases(self, derived_model):
bases = []
for base in derived_model.__bases__:
if getattr(self.get_mapper(base), 'polymorphic', False):
bases.append(base)
else:
bases += self.get_polymorphic_bases(base)
return tuple(bases)
@cached_property
def polymorphic_bases(self):
return tuple(self.get_mapper(base_model) for base_model in self.get_polymorphic_bases(self.model))
# TODO: Fix the diamond inheritance problem???
# I'm not sure is it a problem... After first base save model will has PK...
# @cached_property
# def polymorphic_mro(self):
# pass
@cached_property
def polymorphic_fields(self):
fields = OrderedDict()
for base in self.polymorphic_bases:
fields.update(base.polymorphic_fields)
for name, field in self.fields.items():
fields[name] = field
return fields
@cached_property
def polymorphic_columns(self):
cols = OrderedDict()
for base in self.polymorphic_bases:
cols.update(base.polymorphic_columns)
for name, col in self.fields.items():
cols[name] = col
return cols
@property
def query(self):
bases = self.polymorphic_bases
if bases:
base = bases[-1]
q = base.query
derived_mappers = (self,) + bases[:-1]
for derived_mapper in derived_mappers:
t = derived_mapper.sql_table
q = q.fields(
*self.get_sql_fields()
).tables((
q.tables() & t
).on(
t.pk == base.sql_table.pk
))
else:
q = super(PolymorphicMapper, self).query
q.result = PolymorphicResult(self, self._default_db())
return q
def _do_prepare_model(self, model):
for base in model.mro():
if base is not model and getattr(self.get_mapper(base), 'polymorphic', False):
pk_related_name = "{}_ptr".format(base.__name__.lower())
# self.pk = "{}_id".format(pk_related_name) # Useless, pk read from DB
# TODO: support multiple inheritance
setattr(model, pk_related_name, OneToOne(
base,
field=self.get_mapper(model).pk,
related_field=self.get_mapper(base).pk,
related_name=model.__name__.lower(),
query=(lambda rel: rel.mapper.query.polymorphic(False)),
related_query=(lambda rel: rel.related_mapper.query.polymorphic(False))
))
break
else:
if getattr(self.get_mapper(model), 'polymorphic', False):
setattr(model, "concrete_instance", GenericForeignKey(
type_field="polymorphic_type_id",
related_field=(lambda rel: rel.related_mapper.pk),
field=self.get_mapper(model).pk,
))
super(PolymorphicMapper, self)._do_prepare_model(self.model)
def load(self, data, db, from_db=True, reload=False):
return PolymorphicLoad(self, data, db, from_db, reload).compute()
def validate(self, obj, fields=frozenset(), exclude=frozenset()):
errors = {}
for base in self.polymorphic_bases:
try:
base.validate(obj, fields=fields, exclude=exclude)
except exceptions.ValidationError as e:
errors.update(e.args[0])
try:
super(PolymorphicMapper, self).validate(obj, fields=fields, exclude=exclude)
except exceptions.ValidationError as e:
errors.update(e.args[0])
if errors:
raise exceptions.ValidationError(errors)
def save(self, obj):
if not self.polymorphic_fields['polymorphic_type_id'].get_value(obj):
obj.polymorphic_type_id = self.get_mapper(obj.__class__).name
for base in self.polymorphic_bases:
new_record = self.is_new(obj)
base.save(obj)
for key, base_key in zip(to_tuple(self.pk), to_tuple(base.pk)):
self.fields[key].set_value(obj, self.polymorphic_fields[base_key].get_value(obj))
self.is_new(obj, new_record)
return super(Polymorphi | cMapper, self).save(obj)
class PolymorphicResult(Result):
_polymorphic = True
def polymorphic(self, val=True):
self._polymorphic = val
return self._query
def fill_cache(self):
if self._cache is not None or | not self._polymorphic:
return super(PolymorphicResult, self).fill_cache()
if self._cache is None:
polymorphic, self._polymorphic = self._polymorphic, False
self._cache = list(self.iterator())
self._cache = PopulatePolymorphic(self._cache, self.mapper.get_mapper).compute()
self.populate_prefetch()
self._polymorphic = polymorphic
return self
def iterator(self):
for obj in super(PolymorphicResult, self).iterator():
yield obj.concrete_instance if self._polymorphic and hasattr(obj, 'concrete_instance') else obj
class PopulatePolymorphic(object):
def __init__(self, rows, mapper_accessor):
self._rows = rows
self._get_mapper = mapper_accessor
def compute(self):
if not self._rows:
return []
return self._get_populated_rows()
def _get_populated_rows(self):
rows = self._rows[:]
typed_objects = self._get_typed_objects()
for i, obj in enumerate(rows):
if obj.polymorphic_type_id in typed_objects:
rows[i] = typed_objects[obj.polymorphic_type_id][self._get_current_mapper().get_pk(obj)]
return rows
def _get_typed_objects(self):
typed_objects = {}
pks = {self._get_current_mapper().get_pk(i) for i in self._rows}
for ct in self._get_content_types():
mapper = self._get_mapper(ct)
typed_objects[ct] = {mapper.get_pk(i): i for i in mapper.query.where(mapper.sql_table.pk.in_(pks))}
return typed_objects
def _get_current_mapper(self):
current_model = self._rows[0].__class__
return self._get_mapper(current_model)
def _get_content_types(self):
content_types = {i.polymorphic_type_id for i in self._rows}
content_types -= {self._get_current_mapper().name}
return content_types
class PolymorphicLoad(Load):
def _map_data_from_db(self, data, columns=None):
columns = columns or self._mapper.polymorphic_columns
return super(PolymorphicLoad, self)._map_data_from_db(data, columns)
|
CantemoInternal/pyxb | tests/trac/trac-0196/check.py | Python | apache-2.0 | 2,993 | 0.015703 | # -*- coding: utf-8 -*-
from __future__ import print_function
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import qq0196 as qq
import qu0196 as qu
import uq0196 as uq
import uu0196 as uu
import mix
from pyxb.utils.domutils import BindingDOMSupport
from pyxb.utils import six
BindingDOMSupport.DeclareNamespace(qq.Namespace, 'qq')
BindingDOMSupport.DeclareNamespace(qu.Namespace, 'qu')
BindingDOMSupport.DeclareNamespace(uq.Namespace, 'uq')
BindingDOMSupport.DeclareNamespace(uu.Namespace, 'uu')
BindingDOMSupport.DeclareNamespace(mix.Namespace, 'mix')
qq_bds = BindingDOMSupport(default_namespace=qq.Namespace)
elt_kw = {
'te' : 'te',
'teq' : 'teq',
'teu' : 'teu',
'e' : 'e',
'eq' : 'eq',
'eu' : 'eu',
'a' : 'a',
'aq' : 'aq',
'au' : 'au',
'ta' : 'ta',
'taq' : 'taq',
'tau' : 'tau' }
qq_i = qq.elt(**elt_kw)
qu_i = qu.elt(**elt_kw)
uq_i = uq.elt(**elt_kw)
uu_i = uu.elt(**elt_kw)
i = mix.elt(qq_i, qu_i, u | q_i, uu_i)
try:
print(i.toDOM().toprettyxml())
except pyxb.ValidationError as e:
print(e.details())
raise
i = mix.uue(a='a')
print(i.toxml('utf-8'))
class TestTrac0196 (unittest.Tes | tCase):
module_map = { qq : ( qq.Namespace, qq.Namespace ),
qu : ( qu.Namespace, None ),
uq : ( None, uq.Namespace ),
uu : ( None, None ) }
global_a = ( 'a', 'aq', 'au' )
global_e = ('e', 'eq', 'eu' )
local_a = ( 'ta', 'taq', 'tau' )
local_e = ('te', 'teq', 'teu' )
def testQualified (self):
# Top-level declarations are qualified regardless of presence/absence of form attribute.
# Internal declarations follow form attribute or schema default
for (m, ( efd, afd )) in six.iteritems(self.module_map):
for (n, d) in six.iteritems(m.t._AttributeMap):
if n.localName() in ('a', 'au', 'aq'):
self.assertEqual(n.namespace(), m.Namespace)
elif 'taq' == n.localName():
self.assertEqual(n.namespace(), m.Namespace)
elif 'tau' == n.localName():
self.assertEqual(n.namespace(), None)
elif 'ta' == n.localName():
self.assertEqual(n.namespace(), afd)
else:
self.assertFalse()
for (n, d) in six.iteritems(m.t._ElementMap):
if n.localName() in ('e', 'eu', 'eq'):
self.assertEqual(n.namespace(), m.Namespace)
elif 'teq' == n.localName():
self.assertEqual(n.namespace(), m.Namespace)
elif 'teu' == n.localName():
self.assertEqual(n.namespace(), None)
elif 'te' == n.localName():
self.assertEqual(n.namespace(), efd)
else:
self.assertFalse()
if __name__ == '__main__':
unittest.main()
|
Ninja-1/Circle2 | circlelib/proxy.py | Python | gpl-2.0 | 7,644 | 0.008503 | # UDP Proxy
# The Circle - Decentralized resource discovery software
# Copyright (C) 2001 Paul Francis Harrison
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os, string, socket, select, cPickle, popen2, time
import utility, settings, check
from error import Error
proxy_program = """
import os,socket,sys,string,select,cPickle
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
SOL_SOCKET = socket.SOL_SOCKET
except:
SOL_SOCKET = 1
try:
MSG_DONTWAIT = socket.MSG_DONTWAIT
except:
MSG_DONTWAIT = 64
try:
SO_SNDBUF = socket.SO_SNDBUF
except:
SO_SNDBUF = 7
try:
SO_RCVBUF = socket.SO_RCVBUF
except:
SO_RCVBUF = 8
for port in """+`settings.default_ports`+""":
try:
sock.bind(('',port))
break
except:
pass
else:
sock.bind(('',socket.INADDR_ANY))
for i in range(20,12,-1):
try:
sock.setsockopt(SOL_SOCKET,SO_SNDBUF,1<<i)
sock.setsockopt(SOL_SOCKET,SO_RCVBUF,1<<i)
break
except socket.error:
pass
try:
sock.setsockopt(0,11,1)
except:
pass
if sys.platform == 'linux2':
sock.setsockopt(0,10,0)
print 'PORT',sock.getsockname()[1]
sys.stdout.flush()
def do_error():
try:
message, address = sock.recvfrom(65536,0x2000 | MSG_DONTWAIT)
except:
return
cPickle.dump((1,address,message),sys.stdout,1)
sys.stdout.flush()
while 1:
result = select.select([sock, sys.stdin],[ ],[sock])
if sys.stdin in result[0]:
request = cPickle.load(sys.stdin)
if not request:
sys.exit(0)
for i in range(100):
try:
sock.sendto(request[1],request[0])
except:
do_error()
continue
break
elif sock in result[0]: |
try:
message, address = sock.recvfrom(65536,MSG_DONTWAIT)
cPickle.dump((0,address,message),sys.stdout,1)
sys.stdout.flush()
except:
do_error()
elif sock in result[2]:
do_error()
"""
# queue = [ ]
# while select.select([sys.stdin],[ ],[ ],0)[0]:
# queue.append( cPickle.load(sys.stdin) )
# for request in queue:
# if not | request:
# sys.exit(0)
# try:
# sock.sendto(request[1],request[0])
# except:
# pass
#proxy_program = string.replace(proxy_program,"\n","\\n")
def _make_connection_win32(host, password):
split = string.split(host,'@')
command = 'plink '+split[-1]
if len(split) == 2:
command = command + ' -l "' + split[0] + '"'
command = command + ' -pw "'+password+'"'
# Ensure host key in cache
os.system(command + " echo Connected")
command = command + " -batch python -u -c 'exec input()'"
try:
read_stdout, write_stdin = popen2.popen2(command,mode='b')
time.sleep(0.1)
write_stdin.write(repr(proxy_program)+'\n')
except:
raise Error('Could not start plink.')
write_stdin.flush()
return read_stdout, write_stdin
def _make_connection_text(host, password):
if sys.platform == 'win32':
raise Error("Can't use the proxy with text mode Win32.")
command = "ssh "+host+" python -u -c \"'exec input()'\""
try:
read_stdout, write_stdin = popen2.popen2(command,mode='b')
time.sleep(0.1)
except:
raise Error('Could not start SSH.')
write_stdin.write(repr(proxy_program)+'\n')
write_stdin.flush()
return read_stdout, write_stdin
def _make_connection_daemon(host, password):
if sys.platform == 'win32':
raise Error("Can't use the proxy with Windows.")
command = "ssh "+host+" python -u -c \"'exec input()'\""
try:
read_stdout, write_stdin = popen2.popen2(command,mode='b')
time.sleep(0.1)
except:
raise Error('Could not start SSH.')
write_stdin.write(repr(proxy_program)+'\n')
write_stdin.flush()
return read_stdout, write_stdin
class Proxy(utility.Synchronous):
def __init__(self, host,password=None, transient_for=None):
utility.Synchronous.__init__(self)
if sys.platform == 'win32':
self.read_stdout, self.write_stdin = _make_connection_win32(host,password)
else:
self.read_stdout, self.write_stdin = _make_connection_daemon(host,password)
#On win32 newline is \n\r, confusion ensues
#line = self.read_stdout.readline()
line = ''
while 1:
char = self.read_stdout.read(1)
if char == '\n' or not char:
break
line = line + char
if line[:5] != 'PORT ':
self.write_stdin.close()
self.read_stdout.close()
if sys.platform == 'win32':
message = 'Attempt to start the proxy failed.\n\n' + \
'Check the username, server name and password.\n\n' + \
'(see "Help" for more information)'
else:
message = 'Attempt to start the proxy failed.\n\n'+\
'Check your username, server name and password.'
raise Error(message)
self.address = ('127.0.0.1', string.atoi(line[5:]))
self.broken = 0
self.running = 1
check.check_is_af_inet_address(self.address) #=@E3
def recvfrom(self):
""" Returns (is_error, address, message).
Only one thread should call this at a time!"""
if self.broken:
raise Error("proxy broken")
try:
result = cPickle.load(self.read_stdout)
except:
self.become_broken()
return result
def sendto(self, message, address):
self.lock.acquire()
try:
if self.broken:
raise Error("proxy broken")
try:
cPickle.dump((address,message),self.write_stdin,1)
self.write_stdin.flush()
except:
self.become_broken()
finally:
self.lock.release()
def stop(self):
self.lock.acquire()
try:
self.running = 0
cPickle.dump(None,self.write_stdin,1)
self.write_stdin.close()
self.read_stdout.close()
finally:
self.lock.release()
def become_broken(self):
self.lock.acquire()
try:
if not self.broken and self.running:
error = Error('The connection to the firewall has gone down, or the proxy has crashed.\n\n'+\
'Circle is now disconnected from the network. Sorry.')
utility.mainthread_call(error.show)
self.broken = 1
raise Error('proxy broken')
finally:
self.lock.release()
# vim: set expandtab :
|
roadmapper/ansible | lib/ansible/modules/network/cloudengine/ce_lldp.py | Python | gpl-3.0 | 32,532 | 0.002859 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (abso | lute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_lldp
version_added: "2.10"
short_description: Manages LLDP configuration on HUAWEI CloudEngine switches.
description:
- Manag | es LLDP configuration on HUAWEI CloudEngine switches.
author:
- xuxiaowei0512 (@CloudEngine-Ansible)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
lldpenable:
description:
- Set global LLDP enable state.
required: false
choices: ['enabled', 'disabled']
type: str
mdnstatus:
description:
- Set global MDN enable state.
required: false
choices: ['rxOnly', 'disabled']
type: str
interval:
description:
- Frequency at which LLDP advertisements are sent (in seconds).
required: false
type: int
hold_multiplier:
description:
- Time multiplier for device information in neighbor devices.
required: false
type: int
restart_delay:
description:
- Specifies the delay time of the interface LLDP module from disabled state to re enable.
required: false
type: int
transmit_delay:
description:
- Delay time for sending LLDP messages.
required: false
type: int
notification_interval:
description:
- Suppression time for sending LLDP alarm.
required: false
type: int
fast_count:
description:
- The number of LLDP messages sent to the neighbor nodes by the specified device.
required: false
type: int
mdn_notification_interval:
description:
- Delay time for sending MDN neighbor information change alarm.
required: false
type: int
management_address:
description:
- The management IP address of LLDP.
required: false
default: null
type: str
bind_name:
description:
- Binding interface name.
required: false
default: null
type: str
state:
description:
- Manage the state of the resource.
required: false
default: present
type: str
choices: ['present','absent']
'''
EXAMPLES = '''
- name: "Configure global LLDP enable state"
ce_lldp:
lldpenable: enabled
- name: "Configure global MDN enable state"
ce_lldp:
mdnstatus: rxOnly
- name: "Configure LLDP transmit interval and ensure global LLDP state is already enabled"
ce_lldp:
enable: enable
interval: 32
- name: "Configure LLDP transmit multiplier hold and ensure global LLDP state is already enabled"
ce_lldp:
enable: enable
hold_multiplier: 5
- name: "Configure the delay time of the interface LLDP module from disabled state to re enable"
ce_lldp:
enable: enable
restart_delay: 3
- name: "Reset the delay time for sending LLDP messages"
ce_lldp:
enable: enable
transmit_delay: 4
- name: "Configure device to send neighbor device information change alarm delay time"
ce_lldp:
lldpenable: enabled
notification_interval: 6
- name: "Configure the number of LLDP messages sent to the neighbor nodes by the specified device"
ce_lldp:
enable: enable
fast_count: 5
- name: "Configure the delay time for sending MDN neighbor information change alarm"
ce_lldp:
enable: enable
mdn_notification_interval: 6
- name: "Configuring the management IP address of LLDP"
ce_lldp:
enable: enable
management_address: 10.1.0.1
- name: "Configuring LLDP to manage the binding relationship between IP addresses and interfaces"
ce_lldp:
enable: enable
bind_name: LoopBack2
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"mdnstatus": "rxOnly",
"interval": "32",
"hold_multiplier": "5",
"restart_delay": "3",
"transmit_delay": "4",
"notification_interval": "6",
"fast_count": "5",
"mdn_notification_interval": "6",
"management_address": "10.1.0.1",
"bind_name": "LoopBack2",
"state": "present"
}
existing:
description: k/v pairs of existing global LLDP configuration.
returned: always
type: dict
sample: {
"lldpenable": "disabled",
"mdnstatus": "disabled"
}
end_state:
description: k/v pairs of global LLDP configuration after module execution.
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"mdnstatus": "rxOnly",
"interval": "32",
"hold_multiplier": "5",
"restart_delay": "3",
"transmit_delay": "4",
"notification_interval": "6",
"fast_count": "5",
"mdn_notification_interval": "6",
"management_address": "10.1.0.1",
"bind_name": "LoopBack2"
}
updates:
description: command sent to the device
returned: always
type: list
sample: [
"lldp enable",
"lldp mdn enable",
"lldp transmit interval 32",
"lldp transmit multiplier 5",
"lldp restart 3",
"lldp transmit delay 4",
"lldp trap-interval 6",
"lldp fast-count 5",
"lldp mdn trap-interval 6",
"lldp management-address 10.1.0.1",
"lldp management-address bind interface LoopBack 2"
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import copy
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config
CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys>
<lldpEnable></lldpEnable>
<mdnStatus></mdnStatus>
</lldpSys>
</lldp>
</filter>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
CE_NC_MERGE_GLOBA_MDNENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<mdnStatus>%s</mdnStatus>
</lldpSys>
</lldp>
</config>
"""
CE_NC_GET_GLOBAL_LLDP_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys>
<lldpSysParameter>
<messageTxInterval></messageTxInterval>
<messageTxHoldMultiplier></messageTxHoldMultiplier>
<reinitDelay></reinitDelay>
<txDelay></txDelay>
<notificationInterval></notificationInterval>
<notificationEnable></notificationEnable>
<fastMessageCount></fastMessageCount>
<mdnNotificationInterval></mdnNotificationInterval>
<mdnNotificationEnable></mdnNotificationEnable>
<configManAddr></co |
mpharrigan/msmbuilder | tools/ci/push-docs-to-s3.py | Python | gpl-2.0 | 802 | 0.001247 | import os
import boto
from boto.s3.key import Key
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ | ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(BUCKET_NAME)
root = 'docs/sphinx/_build/html'
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
fn = os.path.join(dirpath, filename)
print 'Uploading', fn, '...'
k = Key(bucket)
k.k | ey = os.path.relpath(fn, root)
k.set_contents_from_filename(fn)
|
cloudtools/troposphere | troposphere/inspector.py | Python | bsd-2-clause | 1,446 | 0.002075 | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, PropsDictType, Tags
from .validators import integer
class AssessmentTarget(AWSObject):
"""
`AssessmentTarget <http://docs.aws. | amazon.com/AWSCloudFormation/latest/UserGuide/aws-reso | urce-inspector-assessmenttarget.html>`__
"""
resource_type = "AWS::Inspector::AssessmentTarget"
props: PropsDictType = {
"AssessmentTargetName": (str, False),
"ResourceGroupArn": (str, False),
}
class AssessmentTemplate(AWSObject):
"""
`AssessmentTemplate <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html>`__
"""
resource_type = "AWS::Inspector::AssessmentTemplate"
props: PropsDictType = {
"AssessmentTargetArn": (str, True),
"AssessmentTemplateName": (str, False),
"DurationInSeconds": (integer, True),
"RulesPackageArns": ([str], True),
"UserAttributesForFindings": (Tags, False),
}
class ResourceGroup(AWSObject):
"""
`ResourceGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-resourcegroup.html>`__
"""
resource_type = "AWS::Inspector::ResourceGroup"
props: PropsDictType = {
"ResourceGroupTags": (Tags, True),
}
|
rafael2reis/iqextractor | qextractor/preprocessing/baseline.py | Python | gpl-3.0 | 8,010 | 0.006991 | # module baseline.py
#
# Copyright (c) 2015 Rafael Reis
#
"""
baseline module - Functions to produce the Baseline System's features.
"""
__version__="1.0"
__author__ = "Rafael Reis <rafael2reis@gmail.com>"
import re
#import globoquotes
def boundedChunk(s):
"""Indetifies the Bounded Chunk.
Assigns a 1 to the three quotation marks ' " - and also to all
the tokens between them, whenever there are more than three
tokens between the quotation marks. Otherwise, assigns a 0 to the token.
Args:
s: 2D array that represents a sentence in the GloboQuotes format
Returns:
An 1D array that indicates if the i-th position is
a bounded chunk.
"""
bc = [ 0 for i in range(len(s))]
a = [ e[0] for e in s ]
text, dicIndex = detoken(a)
#print(text)
p1 = re.compile(r"\"( \w+?){3}.*? \"", re.U)
p2 = re.compile(r"\'( \w+?){3}.*? \'", re.U)
p3 = re.compile(r"\-( \w+?){3}.*? \-", re.U)
for m in re.finditer(p1, text):
#print(m. | start(0), m.end(0))
#print(m.group(0))
i = dicIndex[m.start(0)]
end = dicIndex[m.end(0)-1]
while i < end:
bc[i] = 1
i += 1
for m in re.finditer(p2, text):
| #print(m.start(0), m.end(0))
#print(m.group(0))
i = dicIndex[m.start(0)]
end = dicIndex[m.end(0)-1]
while i < end:
bc[i] = 1
i += 1
for m in re.finditer(p3, text):
#print(m.start(0), m.end(0))
#print(m.group(0))
i = dicIndex[m.start(0)]
end = dicIndex[m.end(0)-1]
while i < end:
bc[i] = 1
i += 1
return bc
def firstLetterUpperCase(s):
"""Indetifies the tokens with First Letter Upper Case.
Args:
s: 2D array that represents a sentence in the GloboQuotes format
Returns:
An 1D array that indicates if the i-th position is
a token that starts with upper letter case.
"""
uc = [ 0 for e in s ]
tokenIndex = 0
pattern = re.compile(r"\w+")
for i in range(len(s)):
text = s[i][tokenIndex][0]
if re.match(pattern, text) and text == text.upper():
uc[i] = 1
return uc
def verbSpeechNeighb(s):
"""Indetifies the Verb of Speech Neighbourhood.
Assigns a 1 to each verb of speech and also to its four closest tokens.
Otherwise, assigns a 0 to the token.
Args:
s: 2D array that represents a sentence in the GloboQuotes format
Returns:
An 1D array that indicates if the i-th position is
a verb of speech neighborhood.
"""
posIndex = 1
vsn = [ 0 for e in s ]
n = len(s)
for i in range(n):
if s[i][posIndex] == 'VSAY':
vsn[i] = 1
if i-1 >= 0:
vsn[i-1] = 1
if i-2 >= 0:
vsn[i-2] = 1
if i+1 < n:
vsn[i+1] = 1
if i+2 < n:
vsn[i+2] = 1
return vsn
def quotationStart(s):
"""Indetifies the quotatins' start by regexp patterns.
Args:
s: 2D array that represents a sentence in the GloboQuotes format
Returns:
An 1D array that indicates if the i-th position is
a quotation start.
"""
qs = ["-" for i in range(len(s))]
a = [ e[0] for e in s ]
convertNe(a, s)
text, dicIndex = detoken(a)
pattern = re.compile(r"(?=([^\d] [\'\"-] .))")
for m in re.finditer(pattern, text):
qs[ dicIndex[m.end(1)-1] ] = "S"
pattern = re.compile(r"[\.\?]( #PO#)+ \: (?!#PO#)")
for m in re.finditer(pattern, text):
qs[ dicIndex[m.end(0)] ] = "S"
return qs
def quotationEnd(s, qs):
"""Creates a 1D array with Quotation End indicators.
Returns an array qe(Quotation End) filled as follow:
If the token in the i-th line is the end of a quotation,
qe[i] = 'E'. Otherwise, qe[i] = '-'
Args:
s: 2D array that represents a sentence in the GloboQuotes format
qs: 1D array with the quotation start annotation. Must be
seen as an additional column of s.
Returns:
An 1D array that indicates if the i-th position is
a quotation end.
"""
qe = ["-" for i in range(len(s))]
a = [ e[0] for e in s ]
convertNe(a, s)
convertQuotationStart(a, qs)
text, dicIndex = detoken(a)
#print("baseline.quotationEnd:", text)
#print("len(dic):", len(dicIndex))
applyLabel(qe, pattern=r"(\' #QS#.*?)[\'\n]", text=text, dic=dicIndex, group=1, offset=-1, offDic=-1, label="E")
applyLabel(qe, pattern=r"(\" #QS#.*?)[\"\n]", text=text, dic=dicIndex, group=1, offset=-1, offDic=-1, label="E")
convertProPess(a, s)
text, dicIndex = detoken(a)
applyLabel(qe, pattern=r"(?=(\- #QS#.*?((?<!ex )\-(?!#PPE#)|$)))", text=text, dic=dicIndex, group=1, offset=-1, offDic=-1, label="E")
convertQuotationStart(a, qs)
text, dicIndex = detoken(a)
applyLabel(qe, pattern=r"(?=(#PO# \: #QS#.*?[\.\?])((( #PO#)+ \:)|$))", text=text, dic=dicIndex, group=1, offset=0, offDic=-1, label="E")
return qe
def applyLabel(q, pattern, text, dic, group, offset, offDic, label):
p = re.compile(pattern)
for m in re.finditer(p, text):
#print(m.end(group) + offDic)
q[ dic[m.end(group) + offDic] + offset ] = label
def convertNe(a, s):
"""
Call the function convert with the parameters to translate the tokens
in the array a to "#PO#", whenever NE is in the valueList.
"""
convert(a, s, transIndex=3, valueList=["I-PER", "I-ORG"], label="#PO#")
def convertQuotationStart(a, qs):
"""
Call the function convert with the parameters to translate the tokens
in the array a to "#PO#", whenever NE is in the valueList.
"""
convert(a, qs, transIndex=0, valueList=["S"], label="#QS#")
def convertProPess(a, s):
"""
Translates the tokens in the array a to "#PO#", whenever NE is in the valueList.
"""
convert(a, s, transIndex=1, valueList=["PROPESS"], label="#PPE#")
def convert(a, s, transIndex, valueList, label):
"""
Given a 1D array a, a 2D sentence array s, sets
a[i] to label, where s[transIndex] in labelList
"""
for i in range(len(s)):
if s[i][transIndex] in valueList:
a[i] = label
def quoteBounds(qs, qe):
"""Creates a 1D array with Quotation Bounds indicators.
Args:
qs: 1D array with the quotation start annotation. An
'S' represents a start and '-' otherwise.
qe: 1D array with the quotation end annotation. An
'E' represents an end and '-' otherwise.
Returns:
An 1D array that indicates if the i-th position
belongs to a quotation, marked with 'q'. If not,
the position contains '-'.
"""
quote = ["O" for i in range(len(qs))]
inQuote = False
for i in range(len(qs)-1, 0, -1):
if qe[i] == 'E' and not inQuote:
quote[i] = 'q'
inQuote = True
elif qs[i] == 'S' and inQuote:
quote[i] = 'q'
inQuote = False
elif inQuote:
quote[i] = 'q'
return quote
def detoken(a):
"""Detokenizes an array of tokens.
Given an array a of tokens, it creates a text string with the tokens
separated by space and a dictionary.
This dicionary is usefull to translate from the
indexes found by regexp in the text string
Args:
a: array of tokens
Returns:
A dicionary(k,v) where:
v: original index of the token in the sentence
k: index of the token in the string
"""
text = " "
#index = [2]
index = [0]
for i in range(len(a)):
text = text + " " + a[i]
index.append(i)
for j in range(len(a[i])):
index.append(i)
#if i > 0:
#index.append(index[i - 1] + 1 + len(a[i-1]))
text = text + "\n"
#dic = { index[i] : i for i in range(len(index)) }
return text, index |
labsland/labmanager | alembic/versions/3ee46f95bcce_make_laboratory_name_longer.py | Python | bsd-2-clause | 600 | 0.016667 | """Make Laboratory name longer
Revision ID: 3ee46f95bcce
Revises: 4bc4c1ae0f38
Create Date: 2014-04-29 21:29:43.714010
"""
# revision identifiers, used by Alembic.
revision = '3ee46f95bcce'
d | own_revision = '4bc4c1ae0f38'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column("laboratories", "laboratory_id", type_ = sa.Unicode(350))
op.alter_column("laboratories", "name", type_ = sa.Unicode(350))
def downgrade():
op.alter_column("laboratories", "laboratory_id", type_ = sa.Unicode(50))
op.alter_column("laboratories", "name", type_ = | sa.Unicode(50))
|
tobinmori/neo4j-python-clients-review | models.py | Python | gpl-2.0 | 544 | 0.003676 |
class Series(object):
def __init__(self, name=None):
| self.name = name
def __str__(self):
return self.name
class Episode(object):
def __init__(self, name=None):
self.name = name
def __str__(self):
return self.name
class Organization(object) | :
def __init__(self, name=None):
self.name = name
def __str__(self):
return self.name
class Actor(object):
def __init__(self, name=None):
self.name = name
def __str__(self):
return self.name
|
SYSTRAN/geographic-api-python-client | systran_geographic_api/models/full_inspiration.py | Python | apache-2.0 | 2,585 | 0.005803 | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# | Inspira | tion type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
|
taw/python_koans | python3/koans/about_class_attributes.py | Python | mit | 4,920 | 0.002236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog:
pass
def test_objects_are_objects(self):
fido = self.Dog()
self.assertEqual(True, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(True, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(True, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(25, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(25, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(22, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(4, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda : 'fidos wag'
self.assertEqual('fidos wag', fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
with self.assertRaises(AttributeError): rover.wag()
# ------------------------------------------------------------------
class Dog2:
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_since_classes_are_objects_you_can_define_singleton_methods_on_them_too(self):
| self.assertRegexpMatches(self.Dog2.growl(), "classmethod growl, arg: cls=Dog2")
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.growl(), "classmeth | od growl, arg: cls=Dog2")
self.assertRegexpMatches(self.Dog2.growl(), "classmethod growl, arg: cls=Dog2")
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertRegexpMatches(self.Dog2.bark(), "staticmethod bark, arg: None")
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.bark(), "staticmethod bark, arg: None")
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
with self.assertRaises(TypeError): fido.name = "Fido"
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual("Fido", fido.get_name_from_instance())
self.assertEqual("Rover", self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual("Fido", fido.get_name())
self.assertEqual("Fido", self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4:
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual('dogs class method', self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual('dogs static method', self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_heres_an_easy_way_to_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual("dogs class method", fido.__class__.a_class_method())
|
pygeo/pycmbs | pycmbs/tests/test_icon.py | Python | mit | 1,061 | 0.006598 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import unittest
from pycmbs.icon import Icon
class TestPycmbsIcon(unittest.TestCase):
def setUp(self):
# requires local installation of ICON sample files!
self.gridfile ='../..//example_data/icon/r2b4_amip.nc'
self.datafile = '../../example_data/icon/rms0006_atm_phy_DOM01_ML_0001.nc'
def test_DummyTest(self):
pass
def test_IconInit(self):
x = Icon(None, None, 'None')
def test_IconInitMissingFile(self):
x = Icon('no.nc', 'not | hing.nc', 'novar')
with self.assertRaises(ValueError):
x.read()
def test_IconInitMissingGridFile(self):
x = Icon(self.datafile, 'nothing.nc', 'novar')
with self.assertRaises(ValueError):
| x.read()
#~ def test_IconReadOK(self):
#~ x = Icon(self.datafile, self.datafile, 'rsns')
#~ x.read()
if __name__ == "__main__":
unittest.main()
|
SrNetoChan/QGIS | cmake/FindSIP.py | Python | gpl-2.0 | 3,000 | 0.002 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILI | TY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindSIP.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
try:
import sipbuild
except ImportError: # Code for SIP v4
import sipconfig
sipcfg = sipconfig.Configuration()
print("sip_version:%06.0x" % sipcfg.sip_version)
print("sip_version_num:%d" % sipcfg.sip_version)
print("sip_version_str:%s" % sipcfg.sip_version_str)
print("sip_bin:%s" % sipcfg.sip_bin)
print("default_sip_dir:%s" % sipcfg.default_sip_dir)
print("sip_inc_dir:%s" % sipcfg.sip_inc_dir)
# SIP 4.19.10+ has new sipcfg.sip_module_dir
if hasattr(sipcfg, "sip_module_dir"):
print("sip_module_dir:%s" % sipcfg.sip_module_dir)
else:
print("sip_module_dir:%s" % sipcfg.sip_mod_dir)
else: # Code for SIP v5
print("sip_version:%06.0x" % sipbuild.version.SIP_VERSION)
print("sip_version_num:%d" % sipbuild.version.SIP_VERSION)
print("sip_version_str:%s" % sipbuild.version.SIP_VERSION_STR)
import shutil
print("sip_bin:%s" % shutil.which("sip5"))
from distutils.sysconfig import get_python_lib
python_modules_dir = get_python_lib(plat_specific=1)
print("default_sip_dir:%s" % python_modules_dir)
|
echristophe/lic | src/LicBinaryWriter.py | Python | gpl-3.0 | 13,727 | 0.003424 | """
Lic - Instruction Book Creation software
Copyright (C) 2010 Remi Gagne
This file (LicBinaryWriter.py) is part of Lic.
Lic is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/
"""
from LicCommonImports import *
from LicCustomPages import Page
from LicTemplate import TemplatePage, TemplatePLI
from LicModel import Arrow, CSI, PLI, SubmodelPreview
def saveLicFile(filename, instructions, FileVersion, MagicNumber):
fh, stream = __createStream(filename, FileVersion, MagicNumber)
# Need to explicitly de-select parts so they refresh the CSI pixmap
instructions.scene.clearSelectedParts()
stream.writeInt32(instructions.getQuantitativeSizeMeasure())
__writeTemplate(stream, instructions.template)
__writeInstructions(stream, instructions)
if fh is not None:
fh.close()
def saveLicTemplate(template, FileVersion, MagicNumber):
fh, stream = __createStream(template.filename, FileVersion, MagicNumber)
__writeTemplate(stream, template)
if fh is not None:
fh.close()
def saveLicTemplateSettings(templateSettings, FileVersion, MagicNumber):
fh, stream = __createStream(templateSettings.filename, FileVersion, MagicNumber)
templateSettings.writeToStream(stream)
if fh is not None:
fh.close()
def __createStream(filename, FileVersion, MagicNumber):
fh = QFile(filename)
if not fh.open(QIODevice.WriteOnly):
raise IOError, unicode(fh.errorStriong())
stream = QDataStream(fh)
stream.setVersion(QDataStream.Qt_4_3)
stream.writeInt32(MagicNumber)
stream.writeInt16(FileVersion)
return fh, stream
def __writeTemplate(stream, template):
# Build part dictionary, since it's not implicitly stored anywhere
partDictionary = {}
for part in template.steps[0].csi.getPartList():
if part.abstractPart.filename not in partDictionary:
part.abstractPart.buildSubAbstractPartDict(partDictionary)
stream << QString(os.path.basename(template.filename))
stream.writeBool(TemplatePage.separatorsVisible)
stream.writeBool(TemplatePLI.includeSubmodels)
__writePartDictionary(stream, partDictionary)
__writeSubmodel(stream, template.submodelPart)
__writePage(stream, template)
__writeStaticInfo(stream) # Need to save PageSize, PLI|CSI size, etc, so we can apply these on template load
values = LicGLHelpers.getLightParameters()
stream.writeInt32(len(values))
for v in values:
stream.writeFloat(v)
def __writeStaticInfo(stream):
stream << Page.PageSize
stream.writeFloat(Page.Resolution)
stream << QString(Page.NumberPos)
def __writeInstructions(stream, instructions):
stream << QString(instructions.mainModel.filename)
__writeStaticInfo(stream)
partDictionary = instructions.partDictionary
__writePartDictionary(stream, partDictionary)
__writeSubmodel(stream, instructions.mainModel)
__writeTitlePage(stream, instructions.mainModel.titlePage)
stream.writeInt32(len(instructions.mainModel.partListPages))
for page in instructions.mainModel.partListPages:
__writePartListPage(stream, page)
stream.writeInt32(len(instructions.scene.guides))
for guide in instructions.scene.guides:
stream.writeInt32(guide.orientation)
stream << guide.pos()
def __writeSubmodel(stream, submodel):
__writeAbstractPart(stream, submodel)
stream.writeInt32(len(submodel.pages))
for page in submodel.pages:
__writePage(stream, page)
stream.writeInt32(len(submodel.submodels))
for model in submodel.submodels:
stream << QString(model.filename)
stream.writeInt32(submodel._row)
name = submodel._parent.filename if hasattr(submodel._parent, 'filename') else ""
stream << QString(name)
stream.writeBool(submodel.isSubAssembly)
def __writeLicColor(stream, licColor):
if licColor is not None:
stream.writeBool(True)
for v in licColor.rgba:
stream.writeFloat(v)
stream << QString(licColor.name)
stream.writeInt32(licColor.ldrawCode)
else:
stream.writeBool(False)
def __writePartDictionary(stream, partDictionary):
partList = [p for p in partDictionary.values() if not p.isSubmodel]
stream.writeInt32(len(partList))
for part in partList:
__writeAbstractPart(stream, part)
submodelList = [p for p in partDictionary.values() if p.isSubmodel]
s | tream.writeInt32(len(submodelList))
for model in submodelList:
__writeSubmodel(stream, model)
def __writeAbstractPart(stream, part):
stream << QString(part.filename) << QString(part.name)
stream. | writeBool(part.isPrimitive)
stream.writeInt32(part.width)
stream.writeInt32(part.height)
stream.writeInt32(part.leftInset)
stream.writeInt32(part.bottomInset)
stream << part.center
stream.writeFloat(part.pliScale)
stream.writeFloat(part.pliRotation[0])
stream.writeFloat(part.pliRotation[1])
stream.writeFloat(part.pliRotation[2])
stream.writeInt32(len(part.primitives) + len(part.edges))
for primitive in part.primitives:
__writePrimitive(stream, primitive)
for primitive in part.edges:
__writePrimitive(stream, primitive)
stream.writeInt32(len(part.parts))
for part in part.parts:
__writePart(stream, part)
def __writePrimitive(stream, primitive):
__writeLicColor(stream, primitive.color)
stream.writeInt16(primitive.type)
stream.writeInt32(primitive.winding)
for point in primitive.points:
stream.writeFloat(point)
def __writePart(stream, part):
stream << QString(part.abstractPart.filename)
stream.writeBool(part.inverted)
__writeLicColor(stream, part.color)
for point in part.matrix:
stream.writeFloat(point)
stream.writeBool(part.calloutPart != None)
pageNumber = stepNumber = -1
if part.parentItem() and part.getCSI():
pageNumber, stepNumber = part.getCSI().getPageStepNumberPair()
stream.writeInt32(pageNumber)
stream.writeInt32(stepNumber)
stream.writeBool(part.isInPLI)
if part.displacement and part.displaceDirection:
stream.writeBool(True)
stream.writeFloat(part.displacement[0])
stream.writeFloat(part.displacement[1])
stream.writeFloat(part.displacement[2])
stream.writeInt32(part.displaceDirection)
if part.filename != "arrow":
stream.writeInt32(len(part.arrows))
for arrow in part.arrows:
__writePart(stream, arrow)
else:
stream.writeBool(False)
if isinstance(part, Arrow):
stream.writeInt32(part.getLength())
stream.writeFloat(part.axisRotation)
def __writeAnnotationSet(stream, page):
stream.writeInt32(len(page.annotations))
for annotation in page.annotations:
stream << annotation.pixmap()
stream << QString(annotation.filename)
stream << annotation.pos()
stream.writeBool(annotation.isAnnotation)
stream.writeInt32(annotation.zValue())
def __writePage(stream, page):
stream.writeInt32(page.number)
stream.writeInt32(page._row)
stream << page.pos() << page.rect()
stream.writeInt32(page.layout.orientation)
stream << page.numberItem.pos() << page.numberItem.font()
if page.numb |
paulcwatts/1hph | gonzo/utils/__init__.py | Python | bsd-3-clause | 2,240 | 0.009821 | import re
import unicodedata
from htmlentitydefs import name2codepoint
from StringIO import StringIO
try:
from django.utils.encoding import smart_unicode
except ImportError:
def smart_unicode(s):
return s
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
# From http://www.djangosnippets.org/snippets/369/
def slugify(klass, s, exclude_pk=None, entities=True, decimal=True, hexadecimal=True,
slug_field='slug', filter_dict=None):
s = smart_unicode(s)
#character entity reference
if entities:
s = re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: unichr(name2codepoint[m.group(1)]), s)
#decimal character reference
if decimal:
try:
s = re.sub('&#(\d+);', lambda m: unichr(int(m.group(1))), s)
except:
pass
#hexadecimal character reference
if hexadecimal:
try:
s = re.sub('&#x([\da-fA-F]+);', lambda m: unichr(int(m.group(1), 16)), s)
except:
pass
#translate
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
#replace unwanted characters
s = re.sub(r'[^-a-z0-9]+', '-', s.lower())
#remove redundant -
s = re.sub('-{2,}', '-', s).strip('-')
slug = s
def get_query():
query = klass.objects.filter(**{slug_field: slug})
if filter_dict:
query = query.filter(**filter_dict)
if exclude_pk:
query = query.exclude(pk=exclude_pk)
return query
counter = 1
while get_query():
slug = "%s-%s" % (s, counter)
counter += 1
return slug
def assign_image_to_model(instance, field_name, file, name=None, content_type=None):
"""Makes assigning an image to a model field less of a PITA."""
# Pumping it through a form seems to be the easiest way
class MyForm(forms.Form):
image = forms.ImageField()
frm = MyForm(files={ 'image':
SimpleUploadedFile(name=name,
content=file.read(),
content_type=content_type)
})
frm.full_clean()
if n | ot frm.is_valid():
| return False
setattr(instance, field_name, frm.cleaned_data['image'])
return True
|
themoken/canto-curses | canto_curses/reader.py | Python | gpl-2.0 | 9,002 | 0.005332 | # -*- coding: utf-8 -*-
#Canto-curses - ncurses RSS reader
# Copyright (C) 2016 Jack Miller <jack@codezen.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
from canto_next.plugins import Plugin
from canto_next.hooks import on_hook, remove_hook, unhook_all
from .command import register_commands, register_arg_types, unregister_all, _int_range
from .theme import prep_for_display
from .html import htmlparser
from .text import TextBox
from .tagcore import tag_updater
from .color import cc
import traceback
import logging
import re
log = logging.getLogger("READER")
class ReaderPlugin(Plugin):
pass
class Reader(TextBox):
def init(self, pad, callbacks):
TextBox.init(self, pad, callbacks)
self.quote_rgx = re.compile("[\\\"](.*?)[\\\"]")
on_hook("curses_opt_change", self.on_opt_change, self)
on_hook("curses_var_change", self.on_var_change, self)
args = {
"link-list" : ("", self.type_link_list),
}
cmds = {
"goto" : (self.cmd_goto, ["link-list"], "Goto a specific link"),
"destroy" : (self.cmd_destroy, [], "Destroy this window"),
"show-links" : (self.cmd_show_links, [], "Toggle link list display"),
"show-summary" : (self.cmd_show_desc, [], "Toggle summary display"),
"show-enclosures" : (self.cmd_show_encs, [], "Toggle enclosure list display")
}
register_arg_types(self, args)
register_commands(self, cmds, "Reader")
self.plugin_class = ReaderPlugin
self.update_plugin_lookups()
def die(self):
unhook_all(self)
unregister_all(self)
def on_opt_change(self, change):
if "reader" not in change:
return
for opt in ["show_description", "enumerate_links", "show_enclosures"]:
if opt in change["reader"]:
self.callbacks["set_var"]("needs_refresh", True)
self.callbacks["release_gui"]()
return
def on_attributes(self, attributes):
sel = self.callbacks["get_var"]("reader_item")
if sel and sel.id in attributes:
remove_hook("curses_attributes", self.on_attributes)
self.callbacks["set_var"]("needs_refresh", True)
self.callbacks["release_gui"]()
def on_var_change(self, variables):
# If we've been instantiated and unfocused, and selection changes,
# we need to be redrawn.
if "selected" in variables and variables["selected"]:
self.callbacks["set_var"]("reader_item", variables["selected"])
self.callbacks["set_var"]("needs_refresh", True)
self.callbacks["release_gui"]()
def type_link_list(self):
domains = { 'all' : self.links }
syms = { 'all' : { '*' : range(0, len(self.links)) } }
fallback = []
if len(self.links):
fallback = [ self.links[0] ]
return (None, lambda x:_int_range("link", domains, syms, fallback, x))
def update_text(self):
reader_conf = self.callbacks["get_opt"]("reader")
s = "No selected story.\n"
extra_content = ""
sel = self.callbacks["get_var"]("reader_item")
if sel:
self.links = [("link",sel.content["link"],"mainlink")]
s = "%B" + prep_for_display(sel.content["title"]) + "%b\n"
# Make sure the story has the most recent info before we check it.
sel.sync()
# We use the description for most reader content, so if it hasn't
# been fetched y | et then grab that from the server now and setup
# a hook to get notified when sel's attributes are changed.
l = [ | "description", "content", "links", "media_content",
"enclosures"]
for attr in l:
if attr not in sel.content:
tag_updater.request_attributes(sel.id, l)
s += "%BWaiting for content...%b\n"
on_hook("curses_attributes", self.on_attributes, self)
break
else:
# Grab text content over description, as it's likely got more
# information.
mainbody = sel.content["description"]
if "content" in sel.content:
for c in sel.content["content"]:
if "type" in c and "text" in c["type"]:
mainbody = c["value"]
# Add enclosures before HTML parsing so that we can add a link
# and have the remaining link logic pick it up as normal.
if reader_conf['show_enclosures']:
parsed_enclosures = []
if sel.content["links"]:
for lnk in sel.content["links"]:
if 'rel' in lnk and 'href' in lnk and lnk['rel'] == 'enclosure':
if 'type' not in lnk:
lnk['type'] = 'unknown'
parsed_enclosures.append((lnk['href'], lnk['type']))
if sel.content["media_content"] and 'href' in sel.content["media_content"]:
if 'type' not in sel.content["media_content"]:
sel.content['media_content']['type'] = 'unknown'
parsed_enclosures.append((sel.content["media_content"]['href'],\
sel.content["media_content"]['type']))
if sel.content["enclosures"] and 'href' in sel.content["enclosures"]:
if 'type' not in sel.content["enclosures"]:
sel.content['enclosures']['type'] = 'unknown'
parsed_enclosures.append((sel.content['enclosures']['href'],\
sel.content['enclosures']['type']))
if not parsed_enclosures:
mainbody += "<br />[ No enclosures. ]<br />"
else:
for lnk, typ in parsed_enclosures:
mainbody += "<a href=\""
mainbody += lnk
mainbody += "\">["
mainbody += typ
mainbody += "]</a>\n"
for attr in list(self.plugin_attrs.keys()):
if not attr.startswith("edit_"):
continue
try:
a = getattr(self, attr)
(mainbody, extra_content) = a(mainbody, extra_content)
except:
log.error("Error running Reader edit plugin")
log.error(traceback.format_exc())
# This needn't be prep_for_display'd because the HTML parser
# handles that.
content, links = htmlparser.convert(mainbody)
# 0 always is the mainlink, append other links
# to the list.
self.links += links
if reader_conf['show_description']:
s += self.quote_rgx.sub(cc("reader_quote") + "\"\\1\"" + cc.end("reader_quote"), content)
if reader_conf['enumerate_links']:
s += "\n\n"
for idx, (t, url, text) in enumerate(self.links):
text = prep_for_display(text)
url = prep_for_display(url)
link_text = "[%B" + str(idx) + "%b][" +\
text + "]: " + url + "\n\n"
if t == "link":
link_text = cc("reader_link") + link_text + cc.end("reader_link")
elif t == "image":
link_text = cc("reader_image_link") + link_text + cc.end("reader_image_link")
s += link_text
# After we have generate |
20tab/django-taggit-live | setup.py | Python | bsd-3-clause | 1,287 | 0.003885 | import os
from setuptools import setup, find_packages
from setuptools.dist import Distribution
import pkg_resources
import taggit_live
setup(name='django-taggit-live',
version=taggit_live.__version__,
description="It's an autocomplete widget for django-taggit TagField",
author='20tab srl: Raffaele Colace',
author_email='info@20tab.com',
url='https://github.com/20tab/django-taggit-live',
l | icense='Mit License',
platforms=['OS Independent'],
classifiers=[
#'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
#'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: | Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=[
'Django >=1.6',
'django-taggit'
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
package_data={
'': ['*.css', '*.js', '*.gif', '*.png', ],
},
)
|
cyrobin/patrolling | timer.py | Python | bsd-3-clause | 580 | 0.003448 | """ Timer
inspired by
http://stackoverflow.com/q | uestions/5849800/tic-toc-functions-analog-in-python
Usage :
with Timer('foo_stuff'):
# do some foo
# do some stuff
"""
import time
from constant import *
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if VERBOSITY_LEVEL > 1:
if self.name:
| print '[%s]' % self.name,
print 'Elapsed: %s s' % (time.time() - self.tstart)
|
movitto/snap | snap/config.py | Python | gpl-3.0 | 10,759 | 0.005763 | # Snap! Configuration Manager
#
# (C) Copyright 2011 Mo Morsi (mo@morsi.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, Version 3,
# as published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import os.path
import optparse, ConfigParser
import snap
from snap.options import *
from snap.snapshottarget import SnapshotTarget
from snap.exceptions import ArgError
class ConfigOptions:
"""Container holding all the configuration options available
to the Snap system"""
# modes of operation
RESTORE = 0
BACKUP = 1
def __init__(self):
'''initialize configuration'''
# mode of operation
self.mode = None
# mapping of t | argets to lists of backends to use when backing up / restoring them
self.target_backends = {}
# mapping of targets to lists of entities to include when backing up
self.target_includes = {}
# mapping of targets to lists of entities to exclude when backing up
self.target_excludes = {}
# output log level
# currently supports 'quiet', 'normal', 'verbose', 'debug'
self.log_level = 'normal'
| # output format to backup / restore
self.outputformat = 'snapfile'
# location of the snapfile to backup to / restore from
self.snapfile = None
# Encryption/decryption password to use, if left as None, encryption will be disabled
self.encryption_password = None
# hash of key/value pairs of service-specific options
self.service_options = {}
for backend in SnapshotTarget.BACKENDS:
self.target_backends[backend] = False
self.target_includes[backend] = []
self.target_excludes[backend] = []
def log_level_at_least(self, comparison):
return (comparison == 'quiet') or \
(comparison == 'normal' and self.log_level != 'quiet') or \
(comparison == 'verbose' and (self.log_level == 'verbose' or self.log_level == 'debug')) or \
(comparison == 'debug' and self.log_level == 'debug')
class ConfigFile:
"""Represents the snap config file to be read and parsed"""
parser = None
def __init__(self, config_file):
'''
Initialize the config file, specifying its path
@param file - the path to the file to load
'''
# if config file doesn't exist, just ignore
if not os.path.exists(config_file):
if snap.config.options.log_level_at_least("verbose"):
snap.callback.snapcallback.warn("Config file " + config_file + " not found")
else:
self.parser = ConfigParser.ConfigParser()
self.parser.read(config_file)
self.__parse()
def string_to_bool(string):
'''Static helper to convert a string to a boolean value'''
if string == 'True' or string == 'true' or string == '1':
return True
elif string == 'False' or string == 'false' or string == '0':
return False
return None
string_to_bool = staticmethod(string_to_bool)
def string_to_array(string):
'''Static helper to convert a colon deliminated string to an array of strings'''
return string.split(':')
string_to_array = staticmethod(string_to_array)
def __get_bool(self, key, section='main'):
'''
Retreive the indicated boolean value from the config file
@param key - the string key corresponding to the boolean value to retrieve
@param section - the section to retrieve the value from
@returns - the value or False if not found
'''
try:
return ConfigFile.string_to_bool(self.parser.get(section, key))
except:
return None
def __get_string(self, key, section='main'):
'''
Retreive the indicated string value from the config file
@param key - the string key corresponding to the string value to retrieve
@param section - the section to retrieve the value from
@returns - the value or None if not found
'''
try:
return self.parser.get(section, key)
except:
return None
def __get_array(self, section='main'):
'''return array of key/value pairs from the config file section
@param section - the section which to retrieve the key / values from
@returns - the array of key / value pairs or None if not found
'''
try:
return self.parser.items(section)
except:
return None
def __parse(self):
'''parse configuration out of the config file'''
for backend in SnapshotTarget.BACKENDS:
val = self.__get_bool(backend)
if val is not None:
snap.config.options.target_backends[backend] = val
else:
val = self.__get_string(backend)
if val:
snap.config.options.target_backends[backend] = True
val = ConfigFile.string_to_array(val)
for include in val:
if include[0] == '!':
snap.config.options.target_excludes[backend].append(include[1:])
else:
snap.config.options.target_includes[backend].append(include)
else:
val = self.__get_bool('no' + backend)
if val:
snap.config.options.target_backends[backend] = False
of = self.__get_string('outputformat')
sf = self.__get_string('snapfile')
ll = self.__get_string('loglevel')
enp = self.__get_string('encryption_password')
if of != None:
snap.config.options.outputformat = of
if sf != None:
snap.config.options.snapfile = sf
if ll != None:
snap.config.options.log_level = ll
if enp != None:
snap.config.options.encryption_password = enp
services = self.__get_array('services')
if services:
for k, v in services:
snap.config.options.service_options[k] = v
class Config:
"""The configuration manager, used to set and verify snap config values
from the config file and command line. Primary interface to the
Configuration System"""
configoptions = None
parser = None
# read values from the config files and set them in the target ConfigOptions
def read_config(self):
# add conf stored in resources if running from local checkout
CONFIG_FILES.append(os.path.join(os.path.dirname(__file__), "..", "resources", "snap.conf"))
for config_file in CONFIG_FILES:
ConfigFile(config_file)
def parse_cli(self):
'''
parses the command line an set them in the target ConfigOptions
'''
usage = "usage: %prog [options] arg"
self.parser = optparse.OptionParser(usage, version=SNAP_VERSION)
self.parser.add_option('', '--restore', dest='restore', action='store_true', default=False, help='Restore snapshot')
self.parser.add_option('', '--backup', dest='backup', action='store_true', default=False, help='Take snapshot')
self.parser.add_option('-l', '--log-level', dest='log_level', action='store', default="normal", help='Log level (quiet, normal, verbose, debug)')
self.parser.add_option('-o', '--outputformat', dest='outputformat', action='store', default=None, help='Output file format')
self.parser.add_option('-f', '--snapfile', dest='snapfile', action='store', default=None, help='Snapshot file, use - for stdout')
self.parser.add_option('-p', '--password', dest='encryption_password' |
AASHE/django-aashestrap | aashestrap/management/commands/get_menu.py | Python | mit | 1,101 | 0 | #!/usr/bin/env python
"""
Retrieves menu from Drupal site
"""
from aashestrap.models import Menu
from django.core.management.base import BaseCommand
import urllib2
from django.http import HttpResponse
from BeautifulSoup import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
def handle(self, *args, **options):
get_menu()
def get_menu():
# Try to retrieve the existing menu object
try:
menu = Menu.objects.get(pk=1)
# If there isn't one, instantiate one
except ObjectDoesNotExist:
menu = Menu(pk=1)
# Request aashe home page
request = urllib2.Request('http://www.aashe.org/')
response = urllib2.urlopen(request)
# Soup it
soup = BeautifulSoup(response)
# Sea | rch | and extract the footer
results = soup.findAll(id="block-menu_block-3")
footer = results[0].__str__('utf8')
# Search and extract the navigation bar
results = soup.findAll(id="navigation")
header = results[0].__str__('utf8')
menu.footer = footer
menu.header = header
menu.save()
|
resync/resync | tests/test_client_utils.py | Python | apache-2.0 | 6,834 | 0.000878 | from .testlib import TestCase
import argparse
import logging
import os.path
import re
import unittest
from resync.client_utils import init_logging, count_true_args, parse_links, parse_link, parse_capabilities, parse_capability_lists, add_shared_misc_options, process_shared_misc_options
from resync.client import ClientFatalError
from resync.url_or_file_open import CONFIG
class TestClientUtils(TestCase):
def test01_init_logging(self):
# to_file=False, logfile=None, default_logfile='/tmp/resync.log',
# human=True, verbose=False, eval_mode=False,
# default_logger='client', extra_loggers=None):
tmplog = os.path.join(self.tmpdir, 'tmp.log')
init_logging(to_file=True, default_logfile=tmplog,
extra_loggers=['x1', 'x2'])
# check x1 and x2 set, not x3 (can tell by level)
self.assertTrue(logging.getLogger('x1').level, logging.DEBUG)
self.assertTrue(logging.getLogger('x2').level, logging.DEBUG)
self.assertEqual(logging.getLogger('x3').level, 0)
# write something, check goes to file
log = logging.getLogger('resync')
log.warning('PIGS MIGHT FLY')
logtxt = open(tmplog, 'r').read()
self.assertTrue(re.search(r'WARNING \| PIGS MIGHT FLY', logtxt))
def test02_count_true_args(self):
self.assertEqual(count_true_args(), 0)
self.assertEqual(count_true_args(True), 1)
self.assertEqual(count_true_args(False), 0)
self.assertEqual(count_true_args(0, 1, 2, 3), 3)
def test03_parse_links(self):
self.assertEqual(parse_links([]), [])
self.assertEqual(parse_links(['u,h']), [{'href': 'h', 'rel': 'u'}])
self.assertEqual(parse_links(['u,h', 'v,i']), [
{'href': 'h', 'rel': 'u'}, {'href': 'i', 'rel': 'v'}])
self.assertRaises(ClientFatalError, parse_links, 'xx')
self.assertRaises(ClientFatalError, parse_links, ['u'])
self.assertRaises(ClientFatalError, parse_links, ['u,h', 'u'])
def test04_parse_link(self):
# Input string of the form: rel,href,att1=val1,att2=val2
self.assertEqual(parse_link('u,h'), {'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b'), {
'a': 'b', 'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b,c=d'), {
'a': 'b', 'c': 'd', 'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b,a=d'), {
'a': 'd', 'href': 'h', 'rel': 'u'}) # desired??
self.assertRaises(ClientFatalError, parse_link, '')
self.assertRaises(ClientFatalError, parse_link, 'u')
self.assertRaises(ClientFatalError, parse_link, 'u,')
self.assertRaises(ClientFatalError, parse_link, 'u,h,,')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a=')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a=b,=c')
def test05_parse_capabilities(self):
# Input string of the form: cap_name=uri,cap_name=uri
# good
c = parse_capabilities('a=')
self.assertEqual(len(c), 1)
self.assertEqual(c['a'], '')
c = parse_capabilities('a=b,c=')
self.assertEqual(len(c), 2)
self.assertEqual(c['a'], 'b')
# bad
self.assertRaises(ClientFatalError, parse_capabilities, 'a')
self.assertRaises(ClientFatalError, parse_capabilities, 'a=b,')
def test06_parse_capability_lists(self):
# Input string of the form: uri,uri
self.assertEqual(parse_capability_lists('a,b'), ['a', 'b'])
def test07_add_shared_misc_options(self):
"""Test add_shared_misc_options method."""
parser = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log')
args = parser.parse_args(['--hash', 'md5', '--hash', 'sha-1',
'--checksum',
'--from', '2020-01-01T01:01:01Z',
'--exclude', 'ex1', '--exclude', 'ex2',
'--multifile',
'--logger', '--logfile', 'log.out',
'--spec-version', '1.0',
'-v'])
self.assertEqual(args.hash, ['md5', 'sha-1'])
self.assertTrue(args.checksum)
self.assertEqual(args.from_datetime, '2020-01-01T01:01:01Z')
self.assertEqual(args.exclude, ['ex1', 'ex2'])
self.assertTrue(args.multifile)
self.assertTrue(args.logger)
self.assertEqual(args.logfile, 'log.out')
self.assertEqual(args.spec_version, '1.0')
self.assertTrue(args.verbose)
# Remote options
parser | = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log', include_remote=True)
args = parser.parse_args(['--noauth',
'--access-token', 'VerySecretToken',
'--delay', '1.23',
'--user-agent', 'rc/2.1.1'])
self.assertTrue(args.noau | th)
self.assertEqual(args.access_token, 'VerySecretToken')
self.assertEqual(args.delay, 1.23)
self.assertEqual(args.user_agent, 'rc/2.1.1')
# Remote options note selected
parser = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log', include_remote=False)
self.assertRaises(SystemExit, parser.parse_args, ['--access-token', 'VerySecretToken'])
def test08_process_shared_misc_options(self):
"""Test process_shared_misc_options method."""
global CONFIG
config_copy = CONFIG.copy()
args = argparse.Namespace(hash=['sha-1'], checksum='md5')
process_shared_misc_options(args)
self.assertEqual(args.hash, ['sha-1', 'md5'])
# Remote options
args = argparse.Namespace(access_token='ExtraSecretToken',
delay=2.5,
user_agent='me',
checksum=None)
process_shared_misc_options(args, include_remote=True)
self.assertEqual(CONFIG['bearer_token'], 'ExtraSecretToken')
self.assertEqual(CONFIG['delay'], 2.5)
self.assertEqual(CONFIG['user_agent'], 'me')
# Negative delay is bad...
args = argparse.Namespace(access_token=None, delay=-1.0, user_agent=None, checksum=None)
self.assertRaises(argparse.ArgumentTypeError, process_shared_misc_options, args, include_remote=True)
# Config is a global so reset back to old version
for (k, v) in config_copy.items():
CONFIG[k] = v
|
elelianghh/gunicorn | tests/support.py | Python | mit | 1,597 | 0.000626 | import functools
import sys
import unittest
import platform
HOST = "127.0.0.1"
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
try:
from types import Si | mpleNamespace
except ImportError:
class SimpleNamespace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
| return self.__dict__ == other.__dict__
|
DrewMeyersCUboulder/UPOD_Bridge | Server/Mongodb.py | Python | mit | 147 | 0.006803 | """
Mongo database connection class
use mongodb to store/retriv | e data
"""
from AbstractDb import Abstr | actDb
class Mongodb(AbstractDb):
pass
|
visionegg/visionegg | demo/mouse_gabor_perspective.py | Python | lgpl-2.1 | 11,119 | 0.015559 | #!/usr/bin/env python
"""Perspective-distorted sinusoidal grating in gaussian window"""
from VisionEgg import *
start_default_logging(); watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.Gratings import *
from VisionEgg.SphereMap import *
from VisionEgg.Text import *
from VisionEgg.Textures import *
import VisionEgg.ParameterTypes as ve_types
import math, os
import pygame
import OpenGL.GL as gl
elevation = 0.0
azimuth = 0.0
fov_x = 90.0
def get_mouse_position():
# convert to OpenGL coordinates
(x,y) = pygame.mouse.get_pos()
y = screen.size[1]-y
return x,y
grating_orient_method = 'reorient stimulus' # start with this as default
def set_az_el(x,y):
global screen, elevation
global grid_stimulus_moving, grating_stimulus, mask
global grating_orient_method
azimuth = (float(x) / screen.size[0]) * 180 - 90
elevation = (float(y) / screen.size[1]) * 180 - 90
az_el_text.parameters.text = "Mouse moves window, press Esc to quit. Az, El = (%5.1f, %5.1f)"%(azimuth,elevation)
mask.parameters.window_center_azimuth = azimuth
mask.parameters.window_center_elevation = elevation
if grating_orient_method == 'reorient stimulus': # normal
grid_stimulus_moving.parameters.center_azimuth = azimuth
grid_stimulus_moving.parameters.center_elevation = elevation
grating_stimulus.parameters.grating_center_azimuth = azimuth
grating_stimulus.parameters.grating_center_elevation = elevation
elif grating_orient_method == 'mask only':
grating_stimulus.parameters.grating_center_azimuth = 0.0
grating_stimulus.parameters.grating_center_elevation = 0.0
grid_stimulus_moving.parameters.center_azimuth = 0.0
grid_stimulus_moving.parameters.center_elevation = 0.0
screen = get_default_screen()
projection_3d = SimplePerspectiveProjection(fov_x=fov_x)
grid_stimulus_moving = AzElGrid(use_text=False, # no text
minor_line_color=(0.9,0.5,0.5,.2),# set low alpha
major_line_color=(1.0,0.0,0.0,.4),# set low alpha
on=False) # start with grid off
grid_stimulus_fixed = AzElGrid(on=False,
minor_line_color=(0.5,0.5,0.7),
) # start with grid off
try:
# We want the maximum number of samples possible, hopefully 2048
grating_stimulus = SphereGrating(num_samples=2048,
radius = 1.0,
spatial_freq_cpd = 1.0/9.0,
temporal_freq_hz = 1.0,
slices = 50,
stacks = 50)
except NumSamplesTooLargeError:
grating_stimulus = SphereGrating(num_samples=1024,
radius = 1.0,
spatial_freq_cpd = 1.0/9.0,
temporal_freq_hz = 1.0,
slices = 50,
stacks = 50)
min_filters = ['GL_LINEAR',
'GL_NEAREST',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_LINEAR_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_NEAREST',
]
cur_min_filter_index = 0
def set_filter_and_text():
global grating_stimulus, filter_text, cur_min_filter_index, min_filters
min_filter = min_filters[cur_min_filter_index]
filter_text.parameters.text = "'g' toggles grid display, 'f' cycles min_filter (now %s)"%min_filter
min_filter_int = eval("gl."+min_filter)
grating_stimulus.parameters.min_filter = min_filter_int
mask = SphereWindow(radius=1.0*0.90, # make sure window is inside sphere with grating
window_shape_radius_parameter=40.0,
slices=50,
stacks=50)
text_color = (0.0,0.0,1.0) # RGB ( blue)
xpos = 10.0
yspace = 5
text_params = {'anchor':'lowerleft','color':text_color,'font_size':20}
text_stimuli = []
ypos = 0
text_stimuli.append( Text( text = "(Hold mouse button to prevent re-orienting stimulus with mask.)",
position=(xpos,ypos),**text_params))
ypos += text_stimuli[-1].parameters.size[1] + yspace
text_stimuli.append( Text( text = "Numeric keypad changes grating orientation.",
position=(xpos,ypos),**text_params))
ypos += text_stimuli[-1].parameters.size[1] + yspace
filte | r_text = Text( text = "temporary text",
position=(xpos,ypos),**text_params)
set_filter_and_text()
text_stimuli.append( filter_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
sf_cutoff_text = Text(text = "'c/C' changes cutoff SF (now %.2f cycles per texel)"%(grating_stimulus.parameters.lowpass_cutoff_cycl | es_per_texel),
position=(xpos,ypos),**text_params)
text_stimuli.append( sf_cutoff_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
zoom_text = Text(text = "'z/Z' changes zoom (X field of view %.2f degrees)"%(fov_x),
position=(xpos,ypos),**text_params)
text_stimuli.append( zoom_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
tf_text = Text(text = "'t/T' changes TF (now %.2f hz)"%(grating_stimulus.parameters.temporal_freq_hz),
position=(xpos,ypos),**text_params)
text_stimuli.append( tf_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
sf_text = Text(text = "'-' shrinks window, '+' grows window, 's/S' changes SF (now %.2f cycles per degree)"%(grating_stimulus.parameters.spatial_freq_cpd),
position=(xpos,ypos),**text_params)
text_stimuli.append( sf_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
az_el_text = Text( text = "Mouse moves window, press Esc to quit. Az, El = (%05.1f, %05.1f)"%(azimuth,elevation),
position=(xpos,ypos),**text_params)
text_stimuli.append( az_el_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
text_stimuli.append( Text( text = "Demonstration of perspective distorted, windowed grating.",
position=(xpos,ypos),**text_params))
viewport = Viewport(screen=screen,
projection=projection_3d,
stimuli=[grating_stimulus,
grid_stimulus_moving,
mask, # mask must be drawn after grating
grid_stimulus_fixed,
])
grid_stimulus_fixed.set(my_viewport=viewport) # must know viewport for proper positioning of text labels
grid_stimulus_moving.set(my_viewport=viewport) # must know viewport for proper positioning of text labels
text_viewport = Viewport(screen=screen, # default (orthographic) viewport
stimuli=text_stimuli)
quit_now = False
shift_key = False
frame_timer = FrameTimer()
while not quit_now:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
quit_now = True
elif event.type == pygame.locals.MOUSEBUTTONDOWN:
if event.button == 1:
grating_orient_method = 'mask only'
elif event.type == pygame.locals.MOUSEBUTTONUP:
if event.button == 1:
grating_orient_method = 'reorient stimulus'
elif event.type == pygame.locals.KEYUP:
if event.key in [pygame.locals.K_LSHIFT,pygame.locals.K_RSHIFT]:
shift_key = False
elif event.type == pygame.locals.KEYDOWN:
if event.key == pygame.locals.K_ESCAPE:
quit_now = True
elif event.key in [pygame.locals.K_LSHIFT,pygame.locals.K_RSHIFT]:
shift_key = True
elif event.key == pygame.locals.K_KP1:
grating_stimulus.parameters.orientation = 225.0
elif event.key == pygame.locals.K_KP2:
grating_stimulus.parameters.orientation = 270.0
elif event.key == pygame.locals.K_KP3:
grating_stimulus.parameters.orientation = 315.0
elif event.key == pygame.locals.K_KP6:
grating_stimulus.para |
coandco/pywinauto | pywinauto/unittests/test_common_controls.py | Python | lgpl-2.1 | 34,027 | 0.005466 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Tests for classes in controls\common_controls.py"
__revision__ = "$Revision: 234 $"
import sys
import ctypes
import unittest
import time
import pprint
import pdb
import os
sys.path.append(".")
from pywinauto.controls import common_controls
from pywinauto.controls.common_controls import *
from pywinauto.win32structures import RECT
from pywinauto.controls import WrapHandle
#from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import findbestmatch
controlspy_folder = os.path.join(
os.path.dirname(__file__), "..\..\controlspy0798")
class RemoteMemoryBlockTestCases(unittest.TestCase):
def test__init__fail(self):
self.assertRaises(AccessDenied, common_controls._RemoteMemoryBlock, 0)
def test__init__fail(self):
self.assertRaises(AccessDenied, common_controls._RemoteMemoryBlock, 0)
class ListViewTestCases(unittest.TestCase):
"Unit tests for the ListViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application() |
app.start_(os.path.join(controlspy_folder, "List View.exe"))
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12 | ,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.ListView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always!
#app.ControlStyles.ListBox1.TypeKeys("{UP}" * 26 + "{SPACE}")
#self.app.ControlStyles.ListBox1.Select("LVS_SHOWSELALWAYS")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the ListView friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ListView")
def testColumnCount(self):
"Test the ListView ColumnCount method"
self.assertEquals (self.ctrl.ColumnCount(), 4)
def testItemCount(self):
"Test the ListView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 9)
def testItemText(self):
"Test the ListView item.Text property"
item = self.ctrl.GetItem(1)
self.assertEquals(item['text'], "Venus")
def testItems(self):
"Test the ListView Items method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
for i, item in enumerate(self.ctrl.Items()):
self.assertEquals(item['text'], flat_texts[i])
def testTexts(self):
"Test the ListView Texts method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
self.assertEquals(flat_texts, self.ctrl.Texts()[1:])
def testGetItem(self):
"Test the ListView GetItem method"
for row in range(self.ctrl.ItemCount()):
for col in range(self.ctrl.ColumnCount()):
self.assertEquals(
self.ctrl.GetItem(row, col)['text'], self.texts[row][col])
def testGetItemText(self):
"Test the ListView GetItem method - with text this time"
for text in [row[0] for row in self.texts]:
self.assertEquals(
self.ctrl.GetItem(text)['text'], text)
self.assertRaises(ValueError, self.ctrl.GetItem, "Item not in this list")
def testColumn(self):
"Test the ListView Columns method"
cols = self.ctrl.Columns()
self.assertEqual (len(cols), self.ctrl.ColumnCount())
# TODO: add more checking of column values
#for col in cols:
# print col
def testGetSelectionCount(self):
"Test the ListView GetSelectedCount method"
self.assertEquals(self.ctrl.GetSelectedCount(), 0)
self.ctrl.Select(1)
self.ctrl.Select(7)
self.assertEquals(self.ctrl.GetSelectedCount(), 2)
# def testGetSelectionCount(self):
# "Test the ListView GetSelectedCount method"
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 0)
#
# self.ctrl.Select(1)
# self.ctrl.Select(7)
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 2)
def testIsSelected(self):
"Test ListView IsSelected for some items"
# ensure that the item is not selected
self.assertEquals(self.ctrl.IsSelected(1), False)
# select an item
self.ctrl.Select(1)
# now ensure that the item is selected
self.assertEquals(self.ctrl.IsSelected(1), True)
def _testFocused(self):
"Test checking the focus of some ListView items"
print "Select something quick!!"
import time
time.sleep(3)
#self.ctrl.Select(1)
print self.ctrl.IsFocused(0)
print self.ctrl.IsFocused(1)
print self.ctrl.IsFocused(2)
print self.ctrl.IsFocused(3)
print self.ctrl.IsFocused(4)
print self.ctrl.IsFocused(5)
#for col in cols:
# print col
def testSelect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(3)
self.ctrl.Select(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testSelectText(self):
"Test ListView Selecting some items"
self.ctrl.Select("Venus")
self.ctrl.Select("Jupiter")
self.ctrl.Select("Uranus")
self.assertRaises(ValueError, self.ctrl.Deselect, "Item not in list")
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testDeselect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(4)
self.ctrl.Deselect(3)
self.ctrl.Deselect(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 1)
def testGetProperties(self):
"Test getting the properties for the listview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"ListView", props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
|
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/tools/deps/containers.py | Python | gpl-3.0 | 10,514 | 0.002378 | from abc import (
ABCMeta,
abstractmethod
)
import os
import string
from galaxy.util import asbool
from ..deps import docker_util
import logging
log = logging.getLogger(__name__)
DEFAULT_CONTAINER_TYPE = "docker"
class ContainerFinder(object):
def __init__(self, app_info):
self.app_info = app_info
self.container_registry = ContainerRegistry()
def find_container(self, tool_info, destination_info, job_info):
def __destination_container(container_description=None, container_id=None, container_type=None):
if container_description:
container_id = container_description.identifier
container_type = container_description.type
container = self.__destination_container(
container_id,
container_type,
tool_info,
destination_info,
job_info
)
return container
# Is destination forcing Galaxy to use a particular container do it,
# this is likely kind of a corner case. For instance if deployers
# do not trust the containers annotated in tools.
for container_type in CONTAINER_CLASSES.keys():
container_id = self.__overridden_container_id(container_type, destination_info)
if container_id:
container = __destination_container(container_type=container_type, container_id=container_id)
if container:
return container
# Otherwise lets see if we can find container for the tool.
# Exact matches first from explicitly listed containers in tools...
for container_description in tool_info.container_descriptions:
container = __destination_container(container_description)
if container:
return container
# Implement vague concept of looping through all containers
# matching requirements. Exact details need to be worked through
# but hopefully the idea that it sits below find_container somewhere
# external components to this module don't need to worry about it
# is good enough.
container_descriptions = self.container_registry.container_descriptions_for_requirements(tool_info.requirements)
for container_description in container_descriptions:
container = __destination_container(container_description)
if container:
return container
# If we still don't have a container, check to see if any container
# types define a default container id and use that.
for container_type in CONTAINER_CLASSES.keys():
container_id = self.__default_container_id(container_type, destination_info)
if container_id:
container = __destination_container(container_type=container_type, container_id=container_id)
if container:
return container
return NULL_CONTAINER
def __overridden_container_id(self, container_type, destination_info):
if not self.__container_type_enabled(container_type, destination_info):
return None
return destination_info.get("%s_container_id_override" % container_type)
def __default_container_id(self, container_type, destination_info):
if not self.__container_type_enabled(container_type, destination_info):
return None
return destination_info.get("%s_default_container_id" % container_type)
def __destination_container(self, container_id, container_type, tool_info, destination_info, job_info):
# TODO: ensure destination_info is dict-like
if not self.__container_type_enabled(container_type, destination_info):
return NULL_CONTAINER
# TODO: Right now this assumes all containers available when a
# container type is - there should be more thought put into this.
# Checking which are availalbe - settings policies for what can be
# auto-fetched, etc....
return CONTAINER_CLASSES[container_type](container_id, self.app_info, tool_info, destination_info, job_info)
def __container_type_enabled(self, container_type, destination_info):
return asbool(destination_info.get("%s_enabled" % container_type, False))
class NullContainerFinder(object):
def find_container(self, tool_info, destination_info, job_info):
return []
class ContainerRegistry():
def __init__(self):
pass
def container_descriptions_for_requirements(self, requirements):
# Return lists of containers that would match requirements...
return []
class AppInfo(object):
def __init__(self, galaxy_root_dir=None, default_file_path=None, outputs_to_working_directory=False):
self.galaxy_root_dir = galaxy_root_dir
self.default_file_path = default_file_path
# TODO: Vary default value for docker_volumes based on this...
self.outputs_to_working_directory = outputs_to_working_directory
class ToolInfo(object):
# TODO: Introduce tool XML syntax to annotate the optional environment
# variables they can consume (e.g. JVM options, license keys, etc..)
# and add these to env_path_through
def __init__(self, container_descriptions=[], requirements=[]):
self.container_descriptions = container_descriptions
self.requirements = requirements
self.env_pass_through = ["GALAXY_SLOTS"]
class JobInfo(object):
def __init__(self, working_directory, tool_directory, job_directory):
self.working_directory = working_directory
self.job_directory = job_directory
# Tool files may be remote staged - so this is unintuitively a property
# of the job not of the tool.
self.tool_directory = tool_directory
class Container( object ):
__metaclass__ = ABCMeta
def __init__(self, container_id, app_info, tool_info, destination_info, job_info):
self.container_id = container_id
self.app_info = app_info
self.tool_info = tool_info
self.destination_info = destination_info
self.job_info = job_info
@abstractmethod
def containerize_command(self, command):
"""
Use destination supplied container configuration parameters,
container_id, and command to build a new command that runs
input command in container.
"""
class DockerContainer(Container):
def containerize_command(self, command):
def prop(name, default):
destination_name = "docker_%s" % name
return self.destination_info.get | (destination_name, default)
env_directives = []
for pass_through_var in self.tool_info.env_pass_through:
env_directives.append('"%s=$%s"' % (pass_through_var, pass_through_var))
# Allow destinations to explicitly set environment variables just for
# docker contain | er. Better approach is to set for destination and then
# pass through only what tool needs however. (See todo in ToolInfo.)
for key, value in self.destination_info.iteritems():
if key.startswith("docker_env_"):
env = key[len("docker_env_"):]
env_directives.append('"%s=%s"' % (env, value))
working_directory = self.job_info.working_directory
if not working_directory:
raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)
volumes_raw = self.__expand_str(self.destination_info.get("docker_volumes", "$defaults"))
# TODO: Remove redundant volumes...
volumes = docker_util.DockerVolume.volumes_from_str(volumes_raw)
volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)
docker_host_props = dict(
docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
host=prop("host", docker_util.DEFAULT_HOST),
)
|
svisser/ipuz | ipuz/puzzlekinds/wordsearch.py | Python | mit | 1,438 | 0 | from ipuz.exceptions import IPUZException
from ipuz.structures import (
validate_crosswordvalues,
validate_dimensions,
validate_groupspec_dict,
)
from ipuz.validators import (
validate_bool,
validate_dict_of_strings,
validate_elements,
validate_list_of_strings,
validate_non_negative_int,
)
def validate_dictionary(field_name, field_data):
if field_data is True or not isinstance(field_data, str):
raise IPUZException("Invalid {} value found".format(field_name))
def validate_solution(field_name, field_data):
if (not isinstance(field_data, (dict, list)) and
not isinstance(field_data, str)):
raise IPUZException("Invalid {} value found".format(field_name))
if isinstance(field_data, list):
validate_list_of_strings(field_name, field_data)
if isinstance(field_data, dict):
validate_groupspec_dict(field_name, field_data)
IPUZ_WORDSEARCH_VALIDATORS = {
"dimensions": validate_dimensions,
"puzzle": validate_crosswordvalues,
"solution": validate_solution,
"dictionary": validate_dictionary,
"saved": validate_list_of_strings,
"showanswers": (validate_elements, ["during", "after", None]),
"time": validate_non_negative_int,
| "points": (validate_elements, ["linear", "log", None]),
"zigzag": validate_bool,
"retrace": validate | _bool,
"useall": validate_bool,
"misses": validate_dict_of_strings,
}
|
pku9104038/edx-platform | common/lib/capa/capa/capa_problem.py | Python | agpl-3.0 | 28,574 | 0.002625 | #
# File: capa/capa_problem.py
#
# Nomenclature:
#
# A capa Problem is a collection of text and capa Response questions.
# Each Response may have one or more Input entry fields.
# The capa problem may include a solution.
#
"""
Main module which shows problems (of "capa" type).
This is used by capa_module.
"""
from datetime import datetime
import logging
import os.path
import re
from lxml import etree
from xml.sax.saxutils import unescape
from copy import deepcopy
from capa.correctmap import CorrectMap
import capa.inputtypes as inputtypes
import capa.customrender as customrender
import capa.responsetypes as responsetypes
from capa.util import contextualize_text, convert_files_to_filenames
import capa.xqueue_interface as xqueue_interface
from capa.safe_exec import safe_exec
from pytz import UTC
# extra things displayed after "show answers" is pressed
solution_tags = ['solution']
# these get captured as student responses
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {
'problem': {'tag': 'div'},
'text': {'tag': 'span'},
'math': {'tag': 'span'},
}
# These should be removed from HTML output, including all subelements
html_problem_semantics = [
"codeparam",
"responseparam",
"answer",
"script",
"hintgroup",
"openendedparam",
"openendedrubric"
]
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# main class for this module
class LoncapaSystem(object):
"""
An encapsulation of resources needed from the outside.
These interfaces are collected here so that a caller of LoncapaProblem
can provide these resources however make sense for their environment, and
this code can remain independent.
Attributes:
i18n: an object implementing the `gettext.Translations` interface so
that we can use `.ugettext` to localize strings.
See :class:`ModuleSystem` for documentation of other attributes.
"""
def __init__( # pylint: disable=invalid-name
self,
ajax_url,
anonymous_student_id,
cache,
can_execute_unsafe_code,
DEBUG, # pylint: disable=invalid-name
filestore,
i18n,
node_path,
render_template,
seed, # Why do we do this if we have self.seed?
STATIC_URL, # pylint: disable=invalid-name
xqueue,
):
self.ajax_url = ajax_url
self.anonymous_student_id = anonymous_student_id
self.cache = cache
self.can_execute_unsafe_code = can_execute_unsafe_code
self.DEBUG = DEBUG # pylint: disable=invalid-name
self.filestore = filestore
self.i18n = i18n
self.node_path = node_path
self.render_template = render_template
self.seed = seed # Why do we do this if we have self.seed?
self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name
self.xqueue = xqueue
class LoncapaProblem(object):
"""
Main class for capa Problems.
"""
def __init__(self, problem_text, id, capa_system, state=None, seed=None):
"""
Initializes capa Problem.
Arguments:
problem_text (string): xml defining the problem.
id (string): identifier for this problem, often a filename (no spaces).
capa_system (LoncapaSystem): LoncapaSystem instance which provides OS,
rendering, user context, and other resources.
state (dict): containing the following keys:
- `seed` (int) random number generator seed
- `student_answers` (dict) maps input id to the stored answer for that input
- `correct_map` (CorrectMap) a map of each input to their 'correctness'
- `done` (bool) indicates whether or not this problem is considered done
- `input_state` (dict) maps input_id to a dictionary that holds the state for that input
seed (int): random number generator seed.
"""
## Initialize class variables from state
self.do_reset()
self.problem_id = id
self.capa_system = capa_system
state = state or {}
# Set seed according to the following priority:
# 1. Contained in problem's state
# 2. Passed into capa_problem via constructor
self.seed = state.get('seed', seed)
assert self.seed is not None, "Seed must be provided for LoncapaProblem."
self.student_answers = state.get('student_answers', {})
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.done = state.get('done', False)
self.input_state = state.get('input_state', {})
# Convert startouttext and endouttext to proper <text></text>
problem_text = re.sub(r"startouttext\s*/", "text", problem_text)
problem_text = re.sub(r"endouttext\s*/", "/text", problem_text)
self.problem_text = problem_text
# parse problem XML file into an element tree
self.tree = etree.XML(problem_text)
# handle any <include file="foo"> tags
self._process_includes()
# construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
# instances for each question in the problem. The dict has keys = xml subtree of
# Response, values = Response instance
self._preprocess_problem(self.tree)
if not self.student_answers: # True when student_answers is an empty dict
self.set_initial_display()
# dictionary of InputType objects associated with this problem
# input_id string -> InputType object
self.inputs = {}
self.extracted_tree = self._extract_html(self.tree)
def do_reset(self):
"""
Reset internal state to unfinished, with no answers
"""
self.student_answers = dict()
self.correct_map = CorrectMap()
self.done = False
def set_initial_display(self):
"""
Set the student's answers to the responders' initial displays, if specified.
"""
initial_answers = dict()
for responder in self.responders.values():
if hasattr(responder, 'get_initial_display'):
initial_answers.update(responder.get_initial_display())
self.student_answers = initial_answers
def __unicode__(self):
return u"LoncapaProblem ({0})".format(self.problem_id)
def get_state(self):
"""
Stored per-user session data neeeded to:
1) | Recreate the problem
| 2) Populate any student answers.
"""
return {'seed': self.seed,
'student_answers': self.student_answers,
'correct_map': self.correct_map.get_dict(),
'input_state': self.input_state,
'done': self.done}
def get_max_score(self):
"""
Return the maximum score for this problem.
"""
maxscore = 0
for responder in self.responders.values():
maxscore += responder.get_max_score()
return maxscore
def get_score(self):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
"""
correct = 0
for key in self.correct_map:
try:
correct += self.correct_map.get_npoints(key)
except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map)
|
yast/yast-python-bindings | examples/Tree-Checkbox4.py | Python | gpl-2.0 | 2,467 | 0.011755 | # encoding: utf-8
# Tree with recursive multi selection
from yast import import_module
import_module('UI')
from yast import *
import copy
class TreeCheckbox4Client:
def main(self):
UI.OpenDialog(
VBox(
Heading("YaST2 Mini Control Center"),
Tree(
Id("mod"),
Opt("multiSelection", "notify", "immediate", "recursiveSelection"),
"Modules",
[
Item(Id("unselected"), "Unseleted"),
Item(
Id("country"),
"Localization",
True,
[
Item(Id("keyboard"), "Keyboard"),
Item(
Id("timezone"),
"Time zone",
True,
[Item(Id("europe"), "Europe"), Item(Id("asia"), "Asia")]
)
]
),
Item(Id("mouse"), "Mouse"),
Item(Id("lan"), "Network"),
Item(Id("xmas"), "Merry X-Mas"),
Item(Id("newyear"), "Happy New Year")
]
),
PushButton(Id("ok"), Opt("default"), "&OK")
)
)
UI.ChangeWidget("mod", "SelectedItems", [Symbol("lan"), Symbol("mouse")])
UI.ChangeWidget("mod", "SelectedItems", [Symbol("xmas"), Symbol("newyear")])
selected_items = UI.QueryWidget(Id("mod"), "SelectedItems")
ycpbuiltins.y2warning("Selected items: %1", selected_items)
id = None
event = {}
while True:
event = UI.WaitForEvent(1000 * 100)
if event["EventReason"] == "SelectionChanged":
ycpbuiltins.y2error("Selection Chan | ged Event")
if event["EventReason"] == "ValueChanged":
ycpbuiltins.y2error("Value Changed Event")
if event["EventType"] == "TimeoutEvent":
ycpbuiltins.y2error("Timeout Event")
if event != None:
ycpbuiltins.y2error(self.formatEvent(event))
id = event["ID"]
ycpbuiltins.y2milestone(
"Selected items: %1",
UI.QueryWidget(Id("mod"), "SelectedItems"))
if id == "ok": |
break
UI.CloseDialog()
def formatEvent(self, event):
event = copy.deepcopy(event)
html = "Event:"
for key, value in ycpbuiltins.foreach(event).items():
html = html + " " + key + ": " + ycpbuiltins.tostring(value) + ""
return html
TreeCheckbox4Client().main()
|
simpeg/simpeg | tutorials/05-dcr/plot_inv_2_dcr2d_irls.py | Python | mit | 15,085 | 0.001061 | """
2.5D DC Resistivity Inversion with Sparse Norms
===============================================
Here we invert a line of DC resistivity data to recover an electrical
conductivity model. We formulate the inverse problem as a least-squares
optimization problem. For this tutorial, we focus on the following:
- Defining the survey
- Generating a mesh based on survey geometry
- Including surface topography
- Defining the inverse problem (data misfit, regularization, directives)
- Applying sensitivity weighting
- Plotting the recovered model and data misfit
"""
#########################################################################
# Import modules
# --------------
#
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import tarfile
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import surface2ind_topo, model_builder
from SimPEG import (
maps,
data,
data_misfit,
regularization,
optimization,
inverse_problem,
inversion,
directives,
utils,
)
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.static_utils import (
plot_pseudosection,
apparent_resistivity_from_voltage,
)
from SimPEG.utils.io_utils.io_utils_electromagnetics import read_dcip2d_ubc
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 3
#############################################
# Define File Names
# -----------------
#
# Here we provide the file paths to assets we need to run the inversion. The
# path to the true model conductivity and chargeability models are also
# provided for comparison with the inversion results. These files are stored as a
# tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz"
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
topo_filename = dir_path + "topo_xyz.txt"
data_filename = dir_path + "dc_data.obs"
#############################################
# Load Data, Define Survey and Plot
# ---------------------------------
#
# Here we load the observed data, define the DC and IP survey geometry and
# plot the data values using pseudo-sections.
# **Warning**: In the following example, the observations file is assumed to be
# sorted by sources
#
# Load data
topo_xyz = np.loadtxt(str(topo_filename))
dc_data = read_dcip2d_ubc(data_filename, "volt", "general")
#######################################################################
# Plot Observed Data in Pseudo-Section
# ------------------------------------
#
# Here, we demonstrate how to plot 2D data in pseudo-section.
# First, we plot the actual data (voltages) in pseudo-section as a scatter plot.
# This allows us to visualize the pseudo-sensitivity locations for our survey.
# Next, we plot the data as apparent conductivities in pseudo-section with a filled
# contour plot.
#
# Plot voltages pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
dc_data,
plot_type="scatter",
ax=ax1,
scale="log",
cbar_label="V/A",
scatter_opts={"cmap": mpl.cm.viridis},
)
ax1.set_title("Normalized Voltages")
plt.show()
# Get apparent conductivities from volts and survey geometry
apparent_conductivities = 1 / apparent_resistivity_from_voltage(
dc_data.survey, dc_data.dobs
)
# Plot apparent conductivity pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
dc_data.survey,
apparent_conductivities,
plot_type="contourf",
ax=ax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
plt.show()
####################################################
# Assign Uncertainties
# --------------------
#
# Inve | rsion with SimPEG requires that we define the uncertainties on our data.
# This represents our estimate of the standard deviation of the
# noise in our data. For DC dat | a, the uncertainties are 10% of the absolute value.
#
#
dc_data.standard_deviation = 0.05 * np.abs(dc_data.dobs)
########################################################
# Create Tree Mesh
# ------------------
#
# Here, we create the Tree mesh that will be used invert the DC data
#
dh = 4 # base cell width
dom_width_x = 3200.0 # domain width x
dom_width_z = 2400.0 # domain width z
nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x
nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z
# Define the base mesh
hx = [(dh, nbcx)]
hz = [(dh, nbcz)]
mesh = TreeMesh([hx, hz], x0="CN")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh,
topo_xyz[:, [0, 2]],
octree_levels=[0, 0, 4, 4],
method="surface",
finalize=False,
)
# Mesh refinement near transmitters and receivers. First we need to obtain the
# set of unique electrode locations.
electrode_locations = np.c_[
dc_data.survey.locations_a,
dc_data.survey.locations_b,
dc_data.survey.locations_m,
dc_data.survey.locations_n,
]
unique_locations = np.unique(
np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0
)
mesh = refine_tree_xyz(
mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(zp)]
mesh = refine_tree_xyz(
mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False
)
mesh.finalize()
###############################################################
# Project Surveys to Discretized Topography
# -----------------------------------------
#
# It is important that electrodes are not model as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# like on the discretized surface.
#
# Create 2D topography. Since our 3D topography only changes in the x direction,
# it is easy to define the 2D topography projected along the survey line. For
# arbitrary topography and for an arbitrary survey orientation, the user must
# define the 2D topography along the survey line.
topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0)
# Find cells that lie below surface topography
ind_active = surface2ind_topo(mesh, topo_2d)
# Extract survey from data object
survey = dc_data.survey
# Shift electrodes to the surface of discretized topography
survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
# Reset survey in data object
dc_data.survey = survey
########################################################
# Starting/Reference Model and Mapping on Tree Mesh
# ---------------------------------------------------
#
# Here, we would create starting and/or reference models for the DC inversion as
# well as the mapping from the model space to the active cells. Starting and
# reference models can be a constant background value or contain a-priori
# structures. Here, the starting model is the natural log of 0.01 S/m.
#
# Define conductivity model in S/m (or resistivity model in Ohm m)
air_conductivity = np.log(1e-8)
background_conductivity = np.log(1e-2)
active_map = maps.InjectActiveCells(mesh, ind_active, np.exp(air_conductivity))
nC = int(ind_active.sum())
conductivity_map = active_map * maps.ExpMap()
# Define model
starting_conductivity_model = background_conductivity * np.ones(nC)
##############################################
# |
ales-erjavec/scipy | scipy/ndimage/utils/generate_label_testvectors.py | Python | bsd-3-clause | 1,672 | 0.001196 | import numpy as np
from scipy.ndimage import label
def generate_test_vecs(infile, strelfile, resultfile):
"test label with different structuring element neighborhoods"
def bitimage(l):
return np.array([[c for c in s] for s in l]) == '1'
data = [np.ones((7, 7)),
bitimage(["1110111",
"1100011",
"1010101",
"0001000",
"1010101",
"1100011",
"1110111"]),
bitimage(["1011101",
"0001000",
"1001001",
"1111111",
"1001001",
"0001000",
"1011101"])]
strels = [np.ones((3, 3)),
np.zeros((3, 3)),
bitimage(["010", "111", "010"]),
bitimage(["101", "010", "101"]),
bitimage(["100", "010", "001"]),
bitimage(["000", "111", "000"]),
bitimage(["110", "010", "011"]),
bitimage(["110", "111", "011"])]
strels = strels + [np.flipud(s) for s in strels]
strels = strels + [np.rot90(s) for s in strels]
strels = [np.fromstring(s, dtype=int).reshape((3, 3))
for s in set(t.a | stype(int).tostring() for t in strels)]
inputs = np.vstack(data)
results = np.vstack([label(d, s)[0] for d in data for s in strels])
strels = np.vstack(strels)
np.savetxt(infile, inputs, fmt="%d")
np.savetxt(strelfile, strels, fmt="%d")
np.sa | vetxt(resultfile, results, fmt="%d")
generate_test_vecs("label_inputs.txt", "label_strels.txt", "label_results.txt")
|
living180/git-cola | cola/widgets/spellcheck.py | Python | gpl-2.0 | 4,103 | 0.000487 | from __future__ import absolute_import, division, print_function, unicode_literals
import re
from qtpy.QtCore import Qt
from qtpy.QtCore import QEvent
from qtpy.QtCore import Signal
from qtpy.QtGui import QMouseEvent
from qtpy.QtGui import QSyntaxHighlighter
from qtpy.QtGui import QTextCharFormat
from qtpy.QtGui import QTextCursor
from qtpy.QtWidgets import QAction
from .. import qtutils
from .. import spellcheck
from ..i18n import N_
from .text import HintedTextEdit
# pylint: disable=too-many-ancestors
class SpellCheckTextEdit(HintedTextEdit):
def __init__(self, context, hint, parent=None):
HintedTextEdit.__init__(self, context, hint, parent)
# Default dictionary based on the current locale.
self.spellcheck = spellcheck.NorvigSpellCheck()
self.highlighter = Highlighter(self.document(), self.spellcheck)
def set_dictionary(self, dictionary):
self.spellcheck.set_dictionary(dictionary)
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
# Rewrite the mouse event to a left button event so the cursor is
# moved to the location of the pointer.
event = QMouseEvent(
QEvent.MouseButtonPress,
event.pos(),
Qt.LeftButton,
Qt.LeftButton,
Qt.NoModifier,
)
HintedTextEdit.mousePressEvent(self, event)
def context_menu(self):
popup_menu = HintedTextEdit.createStandardContextMenu(self)
# Select the word under the cursor.
cursor = self.textCursor()
cursor.select(QTextCursor.WordUnderCursor)
self.setTextCursor(cursor)
# Check if the selected word is misspelled and offer spelling
# suggestions if it is.
spell_menu = None
if self.textCursor().hasSelection():
text = self.textCursor().selectedText()
if not self.spellcheck.check(text):
title = N_('Spelling Suggestions')
spell_menu = qtutils.create_menu(title, self)
for word in self.spellcheck.suggest(text):
action = SpellAction(word, spell_menu)
action.result.connect(self.correct)
spell_menu.addAction(action)
# Only add the spelling suggests to the menu if there are
# suggestions.
if spell_menu.actions():
popup_menu.addSeparator()
popup_menu.addMenu(spell_menu)
return popup_menu, spell_menu
def contextMenuEvent(self, event):
popup_menu, _ = self.context_menu()
popup_menu.exec_(self.mapToGlobal(event.pos()))
def correct(self, word):
"""Replaces the selected text with word."""
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(word)
cursor.endEditBlock()
class Highlighter(QSyntaxHighlighter):
WORDS = r"(?iu)[\w']+"
def __init__(self, doc, spellcheck_widget):
QSyntaxHighlighter.__init__(self, doc)
self.spellcheck = spellcheck_widget
self.enabled = False
def enable(self, enabled):
self.enabled = enabled
self.rehighlight()
def highlightBlock(self, text):
if not self.enabled:
return
fmt = QTextCharFormat()
fmt.setUnderlineColor(Qt.red)
fmt.setUnderlineStyle(QTextCharFormat.SpellCheckUnderline)
for word_object in re.finditer(self.WORDS, text):
if not self.s | pellcheck.check(word_object.group()):
self.setFormat(
word_object.start(), word_object.end() - word_object.start(), fmt
| )
class SpellAction(QAction):
"""QAction that returns the text in a signal."""
result = Signal(object)
def __init__(self, *args):
QAction.__init__(self, *args)
# pylint: disable=no-member
self.triggered.connect(self.correct)
def correct(self):
self.result.emit(self.text())
|
django-danceschool/django-danceschool | danceschool/discounts/cms_toolbars.py | Python | bsd-3-clause | 1,622 | 0.000617 | from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from cms.toolbar_pool import toolbar_pool
from cms.toolbar_base import CMSToolbar
from cms.toolbar.items import Break
@toolbar_pool.register
class DiscountLinksToolbar(CMSToolbar):
''' Add discounts to the financial menu '''
def populate(self):
if not (
self.request.user.has_perm('discounts.change_discountcombo') or
self.request.user.has_perm('discounts.change_discountcategory')
):
return
financial_menu = self.toolbar.get_or_create_menu(
'financial', _('Finances'))
position = financial_menu.find_first(
Break,
identifier='financial_related_items_break'
)
if not position:
financial_menu.add_break('financial_related_items_break')
| position = financial_menu.find_first(
Break,
identifier='financial_related_items_break'
) + 1
related_menu = financial_menu.get_or_create_menu(
'financial-related', _('Related Items'), position=position
)
if self.request.user.has_perm('di | scounts.change_discountcombo'):
related_menu.add_link_item(
_('Discounts'), url=reverse('admin:discounts_discountcombo_changelist')
)
if self.request.user.has_perm('discounts.change_discountcategory'):
related_menu.add_link_item(
_('Discount Categories'),
url=reverse('admin:discounts_discountcategory_changelist')
)
|
puruckertom/poptox | poptox/generic/generic_parameters.py | Python | unlicense | 406 | 0.022167 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 14:50:59 2012
@author: JHarston
"""
import os |
os.environ['DJANGO_SETTINGS_MODULE']='settings'
from django import forms
from django.db import models
class genericInp(forms.Form):
chemical_name = forms.CharField(widget=forms.Textarea (attrs={'col | s': 20, 'rows': 2}))
body_weight_of_bird = forms.FloatField(required=True,label='NEED TO GET INPUTS.') |
dennybaa/st2 | st2common/st2common/persistence/base.py | Python | apache-2.0 | 12,025 | 0.002328 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from mongoengine import NotUniqueError
from st2common import log as logging
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.system.common import ResourceReference
from st2common.transport.reactor import TriggerDispatcher
__all__ = [
'Access',
'ContentPackResource',
'StatusBasedResource'
]
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Access(object):
impl = None
publisher = None
dispatcher = None
# ModelAPI class for this resource
api_model_cls = None
# A list of operations for which we should dispatch a trigger
dispatch_trigger_for_operations = []
# Maps model operation name (e.g. create, update, delete) to the trigger reference which is
# used when dispatching a trigger
operation_to_trigger_ref_map = {}
@classmethod
@abc.abstractmethod
def _get_impl(cls):
pass
@classmethod
@abc.abstractmethod
def _get_publisher(cls):
return None
@classmethod
def _get_dispatcher(cls):
"""
Return a dispatcher class which is used for dispatching triggers.
"""
if not cls.dispatcher:
cls.dispatcher = TriggerDispatcher(LOG)
return cls.dispatcher
@classmethod
@abc.abstractmethod
def _get_by_object(cls, object):
return None
@classmethod
def get_by_name(cls, value):
return cls._get_impl().get_by_name(value)
@classmethod
def get_by_id(cls, value):
return cls._get_impl().get_by_id(value)
@classmethod
def get_by_ref(cls, value):
return cls._get_impl().get_by_ref(value)
@classmethod
def get(cls, *args, **kwargs):
return cls._get_impl().get(*args, **kwargs)
@classmethod
def get_all(cls, *args, **kwargs):
return cls._get_impl().get_all(*args, **kwargs)
@classmethod
def count(cls, *args, **kwargs):
return cls._get_impl().count(*args, **kwargs)
@classmethod
def query(cls, *args, **kwargs):
return cls._get_impl().query(*args, **kwargs)
@classmethod
def distinct(cls, *args, **kwargs):
return cls._get_impl().distinct(*args, **kwargs)
@classmethod
def aggregate(cls, *args, **kwargs):
return cls._get_impl().aggregate(*args, **kwargs)
@classmethod
def insert(cls, model_object, publish=True, dispatch_trigger=True,
log_not_unique_error_as_debug=False):
if model_object.id:
raise ValueError('id for object %s was unexpected.' % model_object)
try:
model_object = cls._get_impl().insert(model_object)
except NotUniqueError as e:
if log_not_unique_error_as_debug:
LOG.debug('Conflict while trying to save in DB.', exc_info=True)
else:
LOG.exception('Conflict while trying to save in DB.')
# On a conflict determine the conflicting object and return its id in
# the raised exception.
conflict_object = cls._get_by_object(model_object)
conflict_id = str(conflict_object.id) if conflict_object else None
message = str(e)
raise StackStormDBObjectConflictError(message=message, conflict_id=conflict_id,
model_object=model_object)
# Publish internal event on the message bus
if publish:
try:
cls.publish_create(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_create_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def add_or_update(cls, model_object, publish=True, dispatch_trigger=True,
log_not_unique_error_as_debug=False):
pre_persist_id = model_object.id
try:
model_object = cls._get_impl().add_or_update(model_object)
except NotUniqueError as e:
if log_not_unique_error_as_debug:
LOG.debug('Conflict while trying to save in DB.', exc_info=True)
else:
LOG.exception('Conflict while trying to save in DB.')
# On a conflict determine the conflicting object and return its id in
# the raised exception.
conflict_object = cls._get_by_object(model_object)
conflict_id = str(conflict | _object.id) if conflict_object else None
message = str(e)
raise StackStormDBObjectConflictError(message=message, conflict_id=confli | ct_id,
model_object=model_object)
is_update = str(pre_persist_id) == str(model_object.id)
# Publish internal event on the message bus
if publish:
try:
if is_update:
cls.publish_update(model_object)
else:
cls.publish_create(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
if is_update:
cls.dispatch_update_trigger(model_object)
else:
cls.dispatch_create_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def update(cls, model_object, publish=True, dispatch_trigger=True, **kwargs):
"""
Use this method when -
* upsert=False is desired
* special operators like push, push_all are to be used.
"""
cls._get_impl().update(model_object, **kwargs)
# update does not return the object but a flag; likely success/fail but docs
# are not very good on this one so ignoring. Explicitly get the object from
# DB abd return.
model_object = cls.get_by_id(model_object.id)
# Publish internal event on the message bus
if publish:
try:
cls.publish_update(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_update_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def delete(cls, model_object, publish=True, dispatch_trigger=True):
persisted_object = cls._get_impl().delete(model_object)
# Publish internal event on the message bus
if publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return persisted_object
####################################################
# Internal event bus message publish related methods
####################################################
@classmethod
def p |
robotics-at-maryland/qubo | src/vl_qubo/src/arduino_node.py | Python | mit | 7,826 | 0.009839 | #!/usr/bin/env python
#sgillen - this program serves as a node that offers the arduino up to the rest of the ros system.
# the packets send to the arduino should be in the following format: p<data>!
# p tells the arduino which command to execute, the data that follows will depend on which command this is
# in general the , character is used as a delimiter, and the ! is used to mark the end of the message
# commands so far
# t,x0,x1,x2,x3,x4,x5,x6,x7! - this sets all 8 thruster values
# d! - this requests the most recent depth value from the arduino (TODO)
import serial, time, sys, select
import rospy
from std_msgs.msg import Int64, Float64, String, Float64MultiArray
from std_srvs.srv import Empty, EmptyResponse
THRUSTER_INVALID = '65535'
STATUS_OK = '0'
STATUS_TIMEOUT = '1'
STATUS_OVERHEAT = '2'
STATUS_OVERHEAT_WARNING = '3'
V_START = 2.0
device = '/dev/ttyACM4'
# When this gets flipped, send shutdown signal
shutdown_flag = False
control_domain = (-128.0, 128.0)
arduino_domain = (1029.0, 1541.0)
num_thrusters = 8
##command variables (should we make this module a class??)
roll_cmd = 0
pitch_cmd = 0
yaw_cmd = 0
depth_cmd = 0
surge_cmd = 0
sway_cmd = 0
# Maps values from control_domain to arduino_domain
def thruster_map(control_in):
ratio = (arduino_domain[1] - arduino_domain[0]) / (control_domain[1] - control_domain[0])
return int(round((control_in - control_domain[0]) * ratio + arduino_domain[0]))
#reads a command from stdin
def read_cmd_stdin():
if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
line = line.rstrip()
if line:
num_bytes = ser.write(line)
print "bytes sent =", num_bytes
#sends an array of ints to the thrusters using the agreed upon protocol
#the actual over the wire value is t,x1,x2,x3,x4,x5,x6,x7,x8!
def send_thruster_cmds(thruster_cmds):
cmd_str = "t"
for cmd in thruster_cmds:
cmd_str += (",")
cmd_str += (str(cmd))
cmd_str += ("!")
ser.write(cmd_str)
#print "arduino return", ser.readline()
##TODO parse return value
# requests depth from arduino, and waits until it receives it
def get_depth():
ser.write('d!')
# blocks forever until receives a newline
depth = ser.readline()
return depth
##------------------------------------------------------------------------------
# callbacks
def shutdown_thrusters(srv):
global shutdown_flag
shutdown_flag = True
return EmptyResponse()
def roll_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
roll_cmd = thruster_map(msg.data)
def pitch_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
pitch_cmd = thruster_map(msg.data)
def yaw_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
yaw_cmd = thruster_map(msg.data)
def depth_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
depth_cmd = thruster_map(msg.data)
def surge_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
surge_cmd = thruster_map | (msg.data)
def sway_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
sway_cmd = thruster_map(msg.data)
def thruster_callback(msg):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
for i in range(0,num_thrusters):
thruster_cmds[i] = thruster_map(msg.data[i])
#print "after map -" , thruster_cmds[i]
##------------------------------------------------------------------------ | ------
# main
if __name__ == '__main__':
# map for messages
msg = {'t': '', 'd': '', 's': ''}
#!!! this also restarts the arduino! (apparently)
# Keep trying to open serial
while True:
try:
ser = serial.Serial(device,115200, timeout=0,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)
break
except:
time.sleep(0.25)
continue
time.sleep(3)
#I can't think of a situation where we want to change the namespace but I guess you never know
qubo_namespace = "/qubo/"
rospy.init_node('arduino_node', anonymous=False)
status_pub = rospy.Publisher(qubo_namespace + 'status', String, queue_size = 10)
depth_pub = rospy.Publisher(qubo_namespace + "depth", Float64, queue_size = 10)
thruster_sub = rospy.Subscriber(qubo_namespace + "thruster_cmds", Float64MultiArray, thruster_callback)
#rospy spins all these up in their own thread, no need to call spin()
rospy.Subscriber(qubo_namespace + "roll_cmd" , Float64, roll_callback)
rospy.Subscriber(qubo_namespace + "pitch_cmd" , Float64, pitch_callback)
rospy.Subscriber(qubo_namespace + "yaw_cmd" , Float64, yaw_callback)
rospy.Subscriber(qubo_namespace + "depth_cmd" , Float64, depth_callback)
rospy.Subscriber(qubo_namespace + "surge_cmd" , Float64, surge_callback)
rospy.Subscriber(qubo_namespace + "sway_cmd" , Float64, sway_callback)
rospy.Service(qubo_namespace + "shutdown_thrusters", Empty, shutdown_thrusters)
thruster_cmds = [thruster_map(0)]*num_thrusters
rate = rospy.Rate(10) #100Hz
# Poll the ina for voltage, start up regular
startup_voltage = 0.0
# zero the thrusters
send_thruster_cmds([0] * num_thrusters)
'''
zero = ser.readline().strip()
while startup_voltage <= V_START:
ser.write('s!')
startup_voltage = float(ser.readline())
time.sleep(0.1)
'''
while not rospy.is_shutdown():
depth = get_depth() #TODO
if depth == '':
pass
else:
msg[depth[0]] = depth[1:]
depth_pub.publish(float(msg[depth[0]]))
#thruster layout found here https://docs.google.com/presentation/d/1mApi5nQUcGGsAsevM-5AlKPS6-FG0kfG9tn8nH2BauY/edit#slide=id.g1d529f9e65_0_3
#surge, yaw, sway thrusters
# thruster_cmds[0] += (surge_cmd - yaw_cmd - sway_cmd)
# thruster_cmds[1] += (surge_cmd + yaw_cmd + sway_cmd)
# thruster_cmds[2] += (surge_cmd + yaw_cmd - sway_cmd)
# thruster_cmds[3] += (surge_cmd - yaw_cmd + sway_cmd)
# #depth, pitch, roll thrusters
# thruster_cmds[4] += (depth_cmd + pitch_cmd + roll_cmd)
# thruster_cmds[5] += (depth_cmd + pitch_cmd - roll_cmd)
# thruster_cmds[6] += (depth_cmd - pitch_cmd - roll_cmd)
# thruster_cmds[7] += (depth_cmd - pitch_cmd + roll_cmd)
# Build the thruster message to send
if shutdown_flag:
send_thruster_cmds([0] * num_thrusters)
else:
send_thruster_cmds(thruster_cmds)
# print "hello"
#ser.write('c!')
#temp = ser.readline()
#print(temp)
# get thruster cmd
x = ser.readline().strip()
if x != '':
msg[x[0]] = x[1:]
# get status
x = ser.readline().strip()
if x != '':
msg[x[0]] = x[1:]
#status = ser.readline().strip()
print(thruster_cmds)
print('Thruster: {0}, status: {1}, depth: {2}'.format(msg['t'], msg['s'], msg['d']))
if msg['t'] == THRUSTER_INVALID:
print('Invalid thruster input')
if msg['s'] == STATUS_OK:
print('STATUS OK')
status_pub.publish(data='OK')
elif msg['s'] == STATUS_TIMEOUT:
print('STATUS TIMEOUT')
status_pub.publish(data='TIMEOUT')
elif msg['s'] == STATUS_OVERHEAT:
print('STATUS OVERHEAT')
status_pub.publish(data='OVERHEAT')
elif msg['s'] == STATUS_OVERHEAT_WARNING:
print('STATUS OVERHEAT WARNING')
status_pub.publish(data='OVERHEAT WARNING')
rate.sleep()
|
xuru/pyvisdk | tests/test_facade.py | Python | mit | 1,752 | 0.005137 | import unittest,types
from pyvisdk import Vim
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.mo.host_system import HostSystem
from pyvisdk.mo.folder import Folder
from pyvisdk.mo.datastore import Datastore
from pyvisdk.mo.cluster_compute_resource import ClusterComputeResource
from pyvisdk.facade. | property_collector import CachedPropertyCollector, HostSystemCachedPropertyCollector
from tests.common import get_options
from time import sleep
from re import match
class Test_Facades(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.options = get_options()
cls.vim = Vim(cls.options.server)
cls.vim.login(cls.options.username, cls.options.password)
@classmethod
de | f tearDownClass(cls):
cls.vim.logout()
def test_HostPropertyCollectorFacade(self):
facade = HostSystemCachedPropertyCollector(self.vim, ["config.storageDevice"])
properties = facade.getProperties()
self.assertGreater(len(properties.keys()), 0)
self.assertEqual(len(properties.values()[0].keys()), 1)
self.assertEqual(len(properties.keys()[0].split(':')), 2)
self.assertNotEqual(properties.keys()[0].split(':')[1], '')
def test_PropertyCollectorFacade(self):
facade = CachedPropertyCollector(self.vim, "HostSystem", ["summary.quickStats"])
properties = facade.getProperties()
self.assertGreater(len(properties.keys()), 0)
self.assertEqual(len(properties.values()[0].keys()), 1)
self.assertEqual(len(properties.keys()[0].split(':')), 2)
self.assertNotEqual(properties.keys()[0].split(':')[1], '')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testHosts']
unittest.main()
|
stackforge/watcher | watcher/decision_engine/solution/solution_comparator.py | Python | apache-2.0 | 832 | 0 | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# di | stributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions | and
# limitations under the License.
#
import abc
class BaseSolutionComparator(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def compare(self, sol1, sol2):
raise NotImplementedError()
|
Decalogue/chat | chat/word2pinyin.py | Python | mit | 4,070 | 0.004119 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PEP 8 check with Pylint
"""Word to pinyin.
"""
from numpy import mat, zeros, where
from pypinyin import pinyin, lazy_pinyin
# from .mytools import time_me
def sum_cosine(matrix, threshold):
"""Calculate the parameters of the semantic Jaccard model based on the
Cosine similarity matrix of semantic word segmentation.
根据语义分词Cosine相似性矩阵计算语义 jaccard 模型的各个参数。
Args:
matrix: Semantic Cosine similarity matrix. 语义分词Cosine相似性矩阵。
threshold: Threshold for semantic matching. 达到语义匹配标准的阈值。
Returns:
total: The semantic intersection of two sentence language fragments.
两个句子语言片段组成集合的语义交集。
num_not_match: The total number of fragments or the maximum value of two sets
that do not meet the semantic matching criteria controlled by the threshold.
两个集合中没有达到语义匹配标准(由阈值threshold控制)的总片段个数或者两者中取最大值。
total_dif: The degree of semantic difference between two sets.
两个集合的语义差异程度。
"""
total = 0
count = 0
row = matrix.shape[0]
col = matrix.shape[1]
zero_row = zeros([1, col])
zero_col = zeros([row, 1])
max_score = matrix.max()
while max_score > threshold:
total += max_score
count += 1
pos = where(matrix == max_score)
i = pos[0][0]
j = pos[1][0]
matrix[i, :] = zero_row
matrix[:, j] = zero_col
max_score = matrix.max()
num = (row - count) if row > col else (col - count)
| return dict(total=total, num_not_match=num, total_dif=max_score)
def match_pinyin(pinyin1, pinyin2):
"""Similarity score betwe | en two pinyin.
计算两个拼音的相似度得分。
"""
assert pinyin1 != "", "pinyin1 can not be empty"
assert pinyin2 != "", "pinyin2 can not be empty"
pv_match = 0
if len(pinyin1) < len(pinyin2):
len_short = len(pinyin1)
len_long = len(pinyin2)
pv_long = pinyin2
pv_short = pinyin1
else:
len_short = len(pinyin2)
len_long = len(pinyin1)
pv_long = pinyin1
pv_short = pinyin2
for i in range(0, len_short):
if pv_short[i] == pv_long[i]:
pv_match += 1
score = pv_match/len_long
return score
def jaccard_pinyin(pv1, pv2, threshold=0.7):
"""Similarity score between two pinyin vectors with jaccard.
计算两个拼音向量的语义 jaccard 相似度得分。
According to the semantic jaccard model to calculate the similarity.
The similarity score interval for each two pinyin sentences was [0, 1].
根据语义jaccard模型来计算相似度。每两个拼音向量的相似度得分区间为为[0, 1]。
"""
sv_matrix = []
sv_rows = []
for pinyin1 in pv1:
for pinyin2 in pv2:
score = match_pinyin(pinyin1, pinyin2)
sv_rows.append(score)
sv_matrix.append(sv_rows)
sv_rows = []
matrix = mat(sv_matrix)
result = sum_cosine(matrix, threshold)
total = result["total"]
total_dif = result["total_dif"]
num = result["num_not_match"]
sim = total/(total + num*(1-total_dif))
return sim
def pinyin_cut(sentence, pattern=None):
"""Cut the sentence into phonetic vectors.
将句子切分为拼音向量。
"""
return lazy_pinyin(sentence)
# @time_me()
def similarity_pinyin(sentence1, sentence2):
"""Similarity score between two based on pinyin vectors with jaccard.
基于拼音向量的语义 jaccard 句子相似度得分。
"""
pv1 = pinyin_cut(sentence1)
pv2 = pinyin_cut(sentence2)
return jaccard_pinyin(pv1, pv2)
if __name__ == '__main__':
print(similarity_pinyin("我想办理粤通卡", "办理悦通卡"))
|
rschnapka/partner-contact | partner_relations_in_tab/model/__init__.py | Python | agpl-3.0 | 1,051 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warr | anty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_partner_relation_ty | pe
from . import res_partner
|
chrislit/abydos | tests/compression/test_compression_rle.py | Python | gpl-3.0 | 3,657 | 0 | # Copyright 2014-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.compression.test_compression_rle.
This module contains unit tests for abydos.compression.RLE
"""
import unittest
from abydos.compression import BWT, RLE
class RLETestCases(unittest.TestCase):
"""Test abydos.compression.RLE.encode & .decode."""
rle = RLE()
bwt = BWT()
bws = 'WWWWWWWWWWWWBWWWWWWWWWWWWBBBWWWWWWWWWWWWWWWWWWWWWWWWBWWWWWWWWWWWWWW'
def test_rle_encode(self):
"""Test abydos.compression.RLE.encode."""
self.assertEqual(self.rle.encode(''), '')
self.assertEqual(self.rle.encode(self.bwt.encode('')), '\x00')
self.assertEqual(self.rle.encode('banana'), 'banana')
self.assertEqual(
self.rle.encode(self.bwt.encode('banana')), 'annb\x00aa'
)
self.assertEqual(self.rle.encode(self.bws), '12WB12W3B24WB14W')
self.assertEqual(
self.rle.encode(self.bwt.encode(self.bws)), 'WWBWWB45WB\x003WB10WB'
)
self.assertEqual(self.rle.encode('Schifffahrt'), 'Schi3fahrt')
def test_rle_decode(self):
"""Test abydos.compression.RLE.decode."""
self.assertEqual(self.rle.decode(''), '')
self.assertEqual(self.bwt.decode(self.rle.decode('\x00')), '')
self.assertEqual(self.rle.decode('banana'), 'banana')
self.assertEqual(
self.bwt.decode(self.rle.decode('annb\x00aa')), 'banana'
)
self.assertEqual(self.rle.decode('12WB12W3B24WB14W'), self.bws)
self.assertEqual(self.rle.decode('12W1B12W3B24W1B14W'), self.bws)
self.assertEqual(
self.bwt.decode(self.rle.decode('WWBWWB45WB\x003WB10WB')), self.bws
)
self.assertEqual(self.rle.decode('Schi3fahrt'), 'Schifffahrt')
def test_rle_roundtripping(self):
"""Test abydos.compression.RLE.encode & .decode roundtripping."""
self.assertEqual(self.rle.decode(self.rle.encode('')), '')
self.assertEqual(
self.bwt.decode(
self.rle.decode(self.rle.encode(self.bwt.encode('')))
),
'',
)
self.assertEqual(self.rle.decode(self.rle.encode('banana')), 'banana')
| self.assertEqual(
self.bwt.decode(
self.rle.decode(self.rle.encode(self.bwt.encode('banana')))
),
'banana',
)
self.assertEqual(self.rle.decode(self.rle.encode(self.bws)), self.bws)
self.assertEqual(
self.bwt.decode(
self.rle.decode(self.rle.encode(self.bwt.encode(self.bws)))
),
self.bws,
)
self.assertEqual(
self.rle.decode | (self.rle.encode('Schifffahrt')), 'Schifffahrt'
)
self.assertEqual(
self.bwt.decode(
self.rle.decode(
self.rle.encode(self.bwt.encode('Schifffahrt'))
)
),
'Schifffahrt',
)
if __name__ == '__main__':
unittest.main()
|
tommyogden/moldy-argon | plot/plot_moldy_argon.py | Python | mit | 1,728 | 0.00463 | #!/usr/bin/env python
import sys
import numpy as np
import json
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def main():
json_data = open('moldy_argon.json')
data = json.load(json_data)
json_data.close()
num_t_steps = len(data)
num_atoms = data[0]['atoms']['num_atoms']
num_dims = data[0]['atoms']['num_dims']
pos = np.zeros([num_t_steps, num_atoms, num_dims])
t_range = np.zeros([num_t_steps])
for i, t in enumerate(data):
pos[i] = data[i]['atoms']['pos']
t_range[i] = data[i]['t']
box_length = 4.
# Set up the figure, axis and plot to animate
fig = plt.figure(figsize=(8.,8.))
ax = fig.add_subplot(111)
ax.set_xlim([-box_length/2, box_length/2])
ax.set_ylim([-box_length/2, box_length/2])
plot, = ax.plot([], [], 'o')
t_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
# Initialization function: plot the background of each frame
def init():
plot.set_data([], [])
t_text.set_text('')
return plot, t_text
# Animation function. This is called sequentially
def animate(i):
x = pos[i,:,0]
y = pos[i,:,1]
plot.set_data(x, y)
t_text.set_text('t = %.2f' % t_range[i])
return plot, t_text
# call animator. blit means only re-draw the parts that have changed.
# anim = animation.FuncAnimation(fig, animate, init_func=init,
# | frames=len(t_range),
# interval=200)#, blit=True)
anim = animation.FuncAnimation(fig, animate, init_func=init, interval=1)
plt.show()
if __name__ == '__main__':
status = main( | )
sys.exit(status) |
iskandr/prototype-pan-allele-class2 | mhc_names.py | Python | apache-2.0 | 3,134 | 0.000638 | from collections import defaultdict
# source: wikipedia article on HLA-DQ
dq_allele_freq_table = """
05:01 02:01 13.16
02:01 02:02 11.08
03:02 02:02 0.08
03:01 04:02 0.03
03:02 04:02 0.11
04:01 04:02 2.26
01:01 05:01 10.85
01:02 05:01 0.03
01:03 05:01 0.03
01:04 05:01 0.71
01:02 05:02 1.20
01:03 05:02 0.05
01:04 05:03 2.03
01:02 05:04 0.08
01:03 06:01 0.66
01:02 06:02 14.27
01:03 06:02 0.03
01:04 06:02 0.03
01:02 06:03 0.27
01:03 06:03 5.66
01:02 06:04 3.40
01:02 0 | 6:09 0.71
02:01 03:01 0.05
03:01 03:01 0.16
03:03 03:01 6.45
03:01 03:04 0.09
03:02 03:04 0.09
04:01 03:01 0.03
05:05 03:01 11.06
06:01 03:01 0.11
03:01 03:02 9.62
03:02 03:02 0.93
02:01 03:03 3.66
03:02 03:03 0.79
"""
# map each beta to a list of (alpha, freq) pairs
dq_beta_to_alphas = defaultdict(list)
dq_alpha_to_betas = defaultdict(list)
for line in dq_allele_freq_table.split("\n"):
line = line.strip( | )
if not line:
continue
a, b, freq = line.split()
a = a.replace(":", "")
b = b.replace(":", "")
freq = float(freq)
dq_beta_to_alphas[b].append((a, freq))
dq_alpha_to_betas[a].append((b, freq))
dq_beta_to_alpha = {
b: max(pairs, key=lambda x: x[1])[0]
for (b, pairs) in dq_beta_to_alphas.items()
}
dq_alpha_to_beta = {
a: max(pairs, key=lambda x: x[1])[0]
for (a, pairs) in dq_alpha_to_betas.items()
}
def normalize_mhc_name(name):
if len(name) == 7 and name.startswith("DRB"):
name = "DRB1" + name[3:]
if not name.startswith("HLA"):
name = "HLA-" + name
name = name.replace("_", "")
name = name.replace(":", "")
name = name.replace("/", "-")
name = name.replace("*", "")
if name.startswith("HLA-DRB"):
# if DR beta is given without its alpha chain
gene_number = name[7]
beta = name[8:]
name = "HLA-DRA10101-DRB%s%s" % (gene_number, beta,)
elif name.startswith("HLA-DRA"):
# expand DRA to DRA1
suffix = name[7:]
parts = suffix.split("-")
if len(parts[0]) == 4:
name = "HLA-DRA1%s" % ("-".join(parts),)
elif name.startswith("HLA-DQB1"):
# if DQ beta is given without its paired alpha
assert len(name) == 12, (len(name), name)
beta = name[8:12]
if beta in dq_beta_to_alpha:
name = "HLA-DQA1%s-DQB1%s" % (
dq_beta_to_alpha[beta], beta)
elif name.startswith("HLA-DQA0"):
# if DQA1 has been written as just DQA
parts = name.split("-")
alpha = parts[1]
name = "-".join(["HLA", "DQA1%s" % alpha[3:]] + parts[2:])
if name.startswith("HLA-DQA1"):
# if DQ alpha is given without its paired beta
parts = name.split("-")
if len(parts) == 2:
alpha = parts[1][4:]
beta = dq_alpha_to_beta[alpha]
name = "HLA-DQA1%s-DQB1%s" % (alpha, beta)
return name
test_normalized = normalize_mhc_name("HLA-DRA*0101/DRB1*0801")
assert test_normalized == "HLA-DRA10101-DRB10801", test_normalized
|
xiaoda99/keras | keras/initializations.py | Python | mit | 2,556 | 0.002739 | from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .utils.theano_utils import sharedX, shared_zeros, shared_ones
def get_fans(shape):
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
fan_out = shape[1] if len(shape) == 2 else shape[0]
return fan_in, fan_out
#XD
def uniform_small(shape, scale=0.005):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def uniform_positive(shape, scale=0.05):
return sharedX(np.random.uniform(low=0., high=scale, size=shape))
def uniform(shape, scale=0.05):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def normal(shape, scale=0.05):
return sharedX(np.random.randn(*shape) * scale)
def lecun_uniform(shape):
''' Reference: LeCun 98, Efficient Backprop
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
'''
fan_in, fan_out = get_fans(shape)
scale = np.sqrt(3. / fan_in)
return uniform(shape, scale)
def glorot_normal(shape):
''' Reference: Glorot & Bengio, AISTATS 2010
'''
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / (fan_in + fan_out))
return normal(shape, s)
def glorot_uniform(shape):
fan_in, fan_out = get_fans(shape)
s = np.sqrt(6. / (fan_in + fan_out))
| return uniform(shape, s)
def he_normal(shape):
''' Reference: He et al., http://arxiv.org/abs/1502.01852
'''
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / fan_in)
ret | urn normal(shape, s)
def he_uniform(shape):
fan_in, fan_out = get_fans(shape)
s = np.sqrt(6. / fan_in)
return uniform(shape, s)
def orthogonal(shape, scale=1.1):
''' From Lasagne. Reference: Saxe et al., http://arxiv.org/abs/1312.6120
'''
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return sharedX(scale * q[:shape[0], :shape[1]])
def identity(shape, scale=1):
if len(shape) != 2 or shape[0] != shape[1]:
raise Exception("Identity matrix initialization can only be used for 2D square matrices")
else:
return sharedX(scale * np.identity(shape[0]))
def zero(shape):
return shared_zeros(shape)
def one(shape):
return shared_ones(shape)
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'initialization')
|
w495/python-video-shot-detector | shot_detector/filters/compound/sigma_cascade.py | Python | bsd-3-clause | 1,654 | 0.008475 | # -*- coding: utf8 -*-
"""
The main idea of this module, that you can combine
any number of any filters without any knowledge about their
implementation. You have only one requirement — user functions
should return a filter (or something that can be cast to a filter).
"""
from __future__ import absolute_import, division, print_function
from builtins import range
from shot_detector.filters import (
DelayFilter,
MeanSWFilter,
StdSWFilter,
)
WINDOW_SIZE = 25
delay = DelayFilter()
original = delay(0)
mean = MeanSWFilter(
# window_size=50,
# strict_windows=True,
# mean_name='EWMA',
cs=False,
)
std = StdSWFilter(
window_size=25,
strict_windows=True,
cs=False,
)
def multi_bill(start=5, stop=50, step=None, pivot=None, **kwargs):
"""
:param start:
:param stop:
:param step:
:param pivot:
:param kwargs:
:return:
"""
if step is None:
step = 1
res = | min_size_filter_generator(start, stop, step, pivot, **kwargs)
res = sum(res) / (stop - start) / step
return res
def min_size_filter_generator(start, stop, step=None, sigma=None,
**kwargs):
"""
:param start:
:param stop:
:param step:
:param sigma:
:param kwargs:
:return:
"""
if step is None:
step = 1
if sigma is None:
sigma = 3
for c_size in range(start, stop, step):
c_m | ean = mean(s=c_size, **kwargs)
c_std = std(s=c_size, **kwargs)
# noinspection PyTypeChecker
bill = (original > (c_mean + sigma * c_std)) | int
yield bill
|
Tyler-Ward/GolemClock | display/mq.py | Python | mit | 1,102 | 0.016334 | import pika
import pickle
from display import LCDLinearScroll
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='clock_output', type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='clock_output', queue=queue_name)
print ' [*] Waiting for messages. To exit press CTRL+C'
def select_callback():
print("select message sent")
chann | el.basic_publish(exchange='clock_output', routing_key='', body='ALARM_STOP')
channel.basic_publish(exchange='clock_output', routing_key='', body='ALARM_CANCEL')
def callback(ch, method, properties, body):
print("message received: | {0}".format(body))
if body == "ALARM_START":
items = ("It's sunny today", "Meeting at 2pm")
lcd_scroller = LCDLinearScroll(items, select_callback=select_callback)
lcd_scroller.display_message("Scroll through\nmessages")
#lcd_scroller.setup_scroll_events()
channel.basic_consume(callback, queue=queue_name, no_ack=True)
channel.start_consuming()
|
jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_simple09.py | Python | bsd-2-clause | 1,048 | 0 | ########################################################################## | #####
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a f | ile created by Excel.
"""
def setUp(self):
self.set_filename('simple09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Test data out of range. These should be ignored.
worksheet.write('A0', 'foo')
worksheet.write(-1, -1, 'foo')
worksheet.write(0, -1, 'foo')
worksheet.write(-1, 0, 'foo')
worksheet.write(1048576, 0, 'foo')
worksheet.write(0, 16384, 'foo')
workbook.close()
self.assertExcelEqual()
|
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/agw/rulerctrl.py | Python | mit | 57,819 | 0.005967 | # --------------------------------------------------------------------------------- #
# RULERCTRL wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 03 Nov 2006
# Latest Revision: 17 Aug 2011, 15.00 GMT
#
#
# TODO List
#
# 1. Any idea?
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@ma | erskoil.com
# andrea.gavana@gmail.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
:class:`Rul | erCtrl` implements a ruler window that can be placed on top, bottom, left or right
to any wxPython widget.
Description
===========
:class:`RulerCtrl` implements a ruler window that can be placed on top, bottom, left or right
to any wxPython widget. It is somewhat similar to the rulers you can find in text
editors software, though not so powerful.
:class:`RulerCtrl` has the following characteristics:
- Can be horizontal or vertical;
- 4 built-in formats: integer, real, time and linearDB formats;
- Units (as ``cm``, ``dB``, ``inches``) can be displayed together with the label values;
- Possibility to add a number of "paragraph indicators", small arrows that point at
the current indicator position;
- Customizable background colour, tick colour, label colour;
- Possibility to flip the ruler (i.e. changing the tick alignment);
- Changing individually the indicator colour (requires PIL at the moment);
- Different window borders are supported (``wx.STATIC_BORDER``, ``wx.SUNKEN_BORDER``,
``wx.DOUBLE_BORDER``, ``wx.NO_BORDER``, ``wx.RAISED_BORDER``, ``wx.SIMPLE_BORDER``);
- Logarithmic scale available;
- Possibility to draw a thin line over a selected window when moving an indicator,
which emulates the text editors software.
And a lot more. See the demo for a review of the functionalities.
Usage
=====
Usage example::
import wx
import wx.lib.agw.rulerctrl as RC
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "RulerCtrl Demo")
panel = wx.Panel(self)
text = wx.TextCtrl(panel, -1, "Hello World! wxPython rules", style=wx.TE_MULTILINE)
ruler1 = RC.RulerCtrl(panel, -1, orient=wx.HORIZONTAL, style=wx.SUNKEN_BORDER)
ruler2 = RC.RulerCtrl(panel, -1, orient=wx.VERTICAL, style=wx.SUNKEN_BORDER)
mainsizer = wx.BoxSizer(wx.HORIZONTAL)
leftsizer = wx.BoxSizer(wx.VERTICAL)
bottomleftsizer = wx.BoxSizer(wx.HORIZONTAL)
topsizer = wx.BoxSizer(wx.HORIZONTAL)
leftsizer.Add((20, 20), 0, wx.ADJUST_MINSIZE, 0)
topsizer.Add((39, 0), 0, wx.ADJUST_MINSIZE, 0)
topsizer.Add(ruler1, 1, wx.EXPAND, 0)
leftsizer.Add(topsizer, 0, wx.EXPAND, 0)
bottomleftsizer.Add((10, 0))
bottomleftsizer.Add(ruler2, 0, wx.EXPAND, 0)
bottomleftsizer.Add(text, 1, wx.EXPAND, 0)
leftsizer.Add(bottomleftsizer, 1, wx.EXPAND, 0)
mainsizer.Add(leftsizer, 3, wx.EXPAND, 0)
panel.SetSizer(mainsizer)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Events
======
:class:`RulerCtrl` implements the following events related to indicators:
- ``EVT_INDICATOR_CHANGING``: the user is about to change the position of one indicator;
- ``EVT_INDICATOR_CHANGED``: the user has changed the position of one indicator.
Supported Platforms
===================
:class:`RulerCtrl` has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
This class processes the following events:
========================== ==================================================
Event Name Description
========================== ==================================================
``EVT_INDICATOR_CHANGED`` The user has changed the indicator value.
``EVT_INDICATOR_CHANGING`` The user is about to change the indicator value.
========================== ==================================================
License And Version
===================
:class:`RulerCtrl` is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 17 Aug 2011, 15.00 GMT
Version 0.3
"""
__docformat__ = "epytext"
import wx
import math
import cStringIO, zlib
# Try to import PIL, if possible.
# This is used only to change the colour for an indicator arrow.
_hasPIL = False
try:
import Image
_hasPIL = True
except:
pass
# Built-in formats
IntFormat = 1
""" Integer format. """
RealFormat = 2
""" Real format. """
TimeFormat = 3
""" Time format. """
LinearDBFormat = 4
""" Linear DB format. """
HHMMSS_Format = 5
""" HHMMSS format. """
# Events
wxEVT_INDICATOR_CHANGING = wx.NewEventType()
wxEVT_INDICATOR_CHANGED = wx.NewEventType()
EVT_INDICATOR_CHANGING = wx.PyEventBinder(wxEVT_INDICATOR_CHANGING, 2)
""" The user is about to change the indicator value. """
EVT_INDICATOR_CHANGED = wx.PyEventBinder(wxEVT_INDICATOR_CHANGED, 2)
""" The user has changed the indicator value. """
# Some accessor functions
#----------------------------------------------------------------------
def GetIndicatorData():
""" Returns the image indicator as a decompressed stream of characters. """
return zlib.decompress(
'x\xda\x01x\x01\x87\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\n\x00\
\x00\x00\n\x08\x06\x00\x00\x00\x8d2\xcf\xbd\x00\x00\x00\x04sBIT\x08\x08\x08\
\x08|\x08d\x88\x00\x00\x01/IDAT\x18\x95m\xceO(\x83q\x1c\xc7\xf1\xf7\xef\xf9\
\xcd\xf6D6\xca\x1c\xc8\x9f\x14\'J-\xc4A9(9(-\xe5 \xed\xe4\xe2\xe2\xb2\x928\
\xb9\xec\xc2\x01\x17.\x0e\xe4\xe6B\xed\xb2\x1c\xdc$5\x97\xf9S\xb3\x14+\x0eO\
\xdb\xccZ\x9e\xfd\xf9\xba\x98E{\x1d\xbf\xbd\xfb\xf4U\x00\x18\x9d\xc3\xad\x1d\
\xa1+\xa7S\x15\xf8\xa1\xb5i\xbc\xc4\xd7\x0f\xca\xc5\xd82U3[\x97\xb1\x82\xc4S\
"\x89\xb4\xc8SZ\xc4\xb2E\xfa\x06CR)\x1c\x00\xb8\x8cb"-|\x94@\x01\x0e\r\xee&\
\xf8\x12\xc5\xdf\xd0\xd4\xf2\xf6i\x90/\x82\xe9\x82\xdb\xe72\xa7\xe7%\x92\x99\
\xdfA\xb4j\x9b]\xa5\xaek\xbag|\xaa\xdd\xca)\xceb\x10\xbe\x87\xacm VT\xd0N\
\x0f\xf9\xd7\x94\xd6\xde\xb1\xdd\xf9\xcdm_\x83\xdb\x81\x95W\x88\x02\xad\x159\
\x01\xcc!U2}\xa3$\x0f\x1dZR\xd1\xfd\xbb\x9b\xc7\x89\xc99\x7f\xb7\xb7\xd1\x00\
\xc0.B\xbe\xac\xc8\xbe?P\x8e\x8c\x1ccg\x02\xd5\x1f\x9a\x07\xf6\x82a[6.D\xfc\
\'"\x9e\xc0\xb5\xa0\xeb\xd7\xa8\xc9\xdd\xbf\xb3pdI\xefRD\xc0\x08\xd6\x8e*\\-\
+\xa0\x17\xff\x9f\xbf\x01{\xb5t\x9e\x99]a\x97\x00\x00\x00\x00IEND\xaeB`\x82G\
\xbf\xa8>' )
def GetIndicatorBitmap():
""" Returns the image indicator as a :class:`Bitmap`. """
return wx.BitmapFromImage(GetIndicatorImage())
def GetIndicatorImage():
""" Returns the image indicator as a :class:`Image`. """
stream = cStringIO.StringIO(GetIndicatorData())
return wx.ImageFromStream(stream)
def MakePalette(tr, tg, tb):
"""
Creates a palette to be applied on an image based on input colour.
:param `tr`: the red intensity of the input colour;
:param `tg`: the green intensity of the input colour;
:param `tb`: the blue intensity of the input colour.
"""
l = []
for i in range(255):
l.extend([tr*i / 255, tg*i / 255, tb*i / 255])
return l
def ConvertWXToPIL(bmp):
"""
Converts a :class:`Image` into a PIL image.
:param `bmp`: an instance of :class:`Image`.
:note: Requires PIL (Python Imaging Library), which can be downloaded from
http://www.pythonware.com/products/pil/
"""
width = bmp.GetWidth()
height = bmp.GetHeight()
img = Image.fromstring("RGBA", (width, height), bmp.GetData())
return img
def ConvertPILToWX(pil, a |
Rahi374/PittAPI | PittAPI/textbook.py | Python | gpl-2.0 | 5,449 | 0.03964 | '''
The Pitt API, to access workable data of the University of Pittsburgh
Copyright (C) 2015 Ritwik Gupta
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import grequests
import requests
import json
import time
session = requests.session()
CODES = [
'ADMJ','ADMPS','AFRCNA','AFROTC','ANTH','ARABIC','ARTSC','ASL','ASTRON','ATHLTR','BACC','BCHS','BECN','BFIN','BHRM','BIND',
'BIOENG','BIOETH','BIOINF','BIOSC','BIOST','BMIS','BMKT','BOAH','BORG','BQOM','BSEO','BSPP','BUS','BUSACC','BUSADM','BUSBIS',
'BUSECN','BUSENV','BUSERV','BUSFIN','BUSHRM','BUSMKT','BUSORG','BUSQOM','BUSSCM','BUSSPP','CDACCT','CDENT','CEE','CGS','CHE',
'CHEM','CHIN','CLASS','CLRES','CLST','CMMUSIC','CMPBIO','COE','COEA','COEE','COMMRC','CS','CSD','DENHYG','DENT','DIASCI','DSANE',
'EAS','ECE','ECON','EDUC','ELI','EM','ENDOD','ENGCMP','ENGFLM','ENGLIT','ENGR','ENGSCI','ENGWRT','ENRES','EOH','EPIDEM','FACDEV',
'FILMG','FILMST','FP','FR','FTADMA','FTDA','FTDB','FTDC','FTDR','GEOL','GER','GERON','GREEK','GREEKM','GSWS','HAA','HIM','HINDI',
'HIST','HONORS','HPA','HPM','HPS','HRS','HUGEN','IDM','IE','IL','IMB','INFSCI','INTBP','IRISH','ISB','ISSP','ITAL','JPNSE','JS',
'KOREAN','LATIN','LAW','LCTL','LDRSHP','LEGLST','LING','LIS','LSAP','MATH','ME','MED','MEDEDU','MEMS','MILS','MOLBPH','MSCBIO',
'MSCBMP','MSCMP','MSE','MSIMM','MSMBPH','MSMGDB','MSMPHL','MSMVM','MSNBIO','MUSIC','NEURO','NPHS','NROSCI','NUR','NURCNS','NURNM',
'NURNP','NURSAN','NURSP','NUTR','ODO','OLLI','ORBIOL','ORSUR','OT','PAS','PEDC','PEDENT','PERIO','PERS','PETE','PHARM','PHIL','PHYS',
'PIA','POLISH','PORT','PROSTH','PS','PSY','PSYC','PSYED','PT','PUBHLT','PUBSRV','REHSCI','REL','RELGST','RESTD','RUSS','SA','SERCRO',
'SLAV','SLOVAK','SOC','SOCWRK','SPAN','STAT','SWAHIL','SWBEH','SWCOSA','SWE','SWGEN','SWINT','SWRES','SWWEL','TELCOM','THEA','TURKSH',
'UKRAIN','URBNST','VIET']
def get_books_data(courses_info):
"""Returns list of dictionaries of book information. | """
request_objs = []
course_names = [] # need to save these
instructors = [] # need to save these
for i in range(len(courses_info)):
book_info = courses_info[i]
print(book_info)
| course_names.append(book_info['course_name'])
instructors.append(book_info['instructor'])
request_objs.append(grequests.get(get_department_url(book_info['department_code'], book_info['term']), timeout=10))
responses = grequests.map(request_objs) # parallel requests
course_ids = []
j = 0 # counter to get course_names and instructors
for r in responses:
json_data = r.json()
sections = []
course_id = ''
for course_dict in (json_data):
if course_dict['id'] == course_names[j]:
sections = course_dict['sections']
break
for section in sections:
if section['instructor'] == instructors[j]:
course_id = section['id']
break
course_ids.append(course_id)
j += 1
book_url = 'http://pitt.verbacompare.com/comparison?id='
if (len(course_ids) > 1):
for course_id in course_ids:
book_url += course_id + '%2C' # format url for multiple classes
else:
book_url += course_ids[0] # just one course
book_data = session.get(book_url).text
books_list = []
try:
start = book_data.find('Verba.Compare.Collections.Sections') + len('Verba.Compare.Collections.Sections') + 1
end = book_data.find('}]}]);') + 4
info = [json.loads(book_data[start:end])]
for i in range(len(info[0])):
for j in range(len(info[0][i]['books'])):
book_dict = {}
big_dict = info[0][i]['books'][j]
book_dict['isbn'] = big_dict['isbn']
book_dict['citation'] = big_dict['citation']
book_dict['title'] = big_dict['title']
book_dict['edition'] = big_dict['edition']
book_dict['author'] = big_dict['author']
books_list.append(book_dict)
except ValueError as e:
print('Error while decoding response, try again!')
raise e
return books_list # return list of dicts of books
def get_department_url(department_code,term='2600'): # 2600 --> spring 2017
"""Returns url for given department code."""
department_number = CODES.index(department_code) + 22399
if department_number > 22462:
department_number += 2 # between codes DSANE and EAS 2 id numbers are skipped.
if department_number > 22580:
department_number += 1 # between codes PUBSRV and REHSCI 1 id number is skipped.
url = 'http://pitt.verbacompare.com/compare/courses/' + '?id=' + str(department_number) + '&term_id=' + term
return url
|
ForTheYin/ical2fullcalendar | ics-convert.py | Python | mit | 1,883 | 0.020181 | #!/usr/bin/env python
"""Converts the .ics/.ical file into a FullCalendar compatiable JSON file
FullCalendar uses a specific JSON format similar to iCalendar format. This
script creates a JSON file containing renamed event components. Only the
title, description, start/end time, and url data are used. Does not support
repeating events.
"""
import sys
import json
__import__('pytz')
__import__('icalendar')
from icalendar import Calendar
__author__ = "Andy Yin"
__copyright__ = "Copyright (C) 2015, Andy Yin"
__credits__ = ["Eddie Blundell"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Andy Yin"
__email__ = "me@fortheyin.com"
__status__ = "Production"
# quit if the arguments are incorrect, and prints a usage
if (len(sys.argv) != 2 and len(sys.argv) != 3):
print sys.argv[0] + ': illegal operation'
print 'usag | e: python ' + sys.argv[0] + ' file [output]'
exit(1)
# default output filename (just adds .json extension on the given file)
out_fi | le = sys.argv[1] + '.json'
if (len(sys.argv) == 3):
# changes output filename to the 2nd arugment
out_file = sys.argv[2]
# opens the input .ics file and parses it as iCalendar Calendar object
ics_file = open(sys.argv[1],'rb')
ics_cal = Calendar.from_ical(ics_file.read())
# array of event information
result = []
for component in ics_cal.walk():
if component.name == "VEVENT":
# set containing all the events
event = {
'title':component.get('summary'),
'backgroundColor':component.get('location'),
'description':component.get('description'),
'start':component.decoded('dtstart').isoformat(),
'end':component.decoded('dtend').isoformat(),
'url':component.get('url')
}
# append to the result array
result.append(event)
ics_file.close()
# saves the result using jsonify
json_out = open(out_file, 'w')
json_out.write(json.dumps(result, sort_keys = False, indent = 4))
json_out.close() |
alviproject/alvi | alvi/client/scenes/traverse_tree_depth_first.py | Python | mit | 588 | 0 | from alvi.client.scenes.create_tree import CreateTree
class TraverseTreeDepthFirst(CreateTree):
def traverse(self, marker, tree, node):
marker.append(node)
tree.stats.traversed_nodes += 1
tree.sync()
for child in node.children:
self.traverse(marker, tree, child)
def run(self, **kwargs):
tree = kwargs['container']
with tree.postpone_ | sync():
super().run(**kwargs)
marker = tree.create_multi_marker("Traversed")
tree.stats.traversed_nodes = 0
self.traverse(marker, tree, tree.ro | ot)
|
ekosareva/vmware-dvs | networking_vsphere/tests/unit/drivers/test_ovs_firewall.py | Python | apache-2.0 | 28,260 | 0 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.common import constants
from networking_vsphere.drivers import ovs_firewall as ovs_fw
from networking_vsphere.tests import base
fake_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'sg_provider_rules': [],
'security_group_rules': [
{"direction": "ingress",
"protocol": "tcp",
"port_range_min": 2001,
"port_range_max": 2009,
"source_port_range_min": 67,
"source_port_range_max": 68,
"ethertype": "IPv4",
"source_ip_prefix": "150.1.1.0/22",
"dest_ip_prefix": "170.1.1.0/22"}]}
fake | _res_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups' | : "abc",
'lvid': "100",
'device': "123"}
cookie = ("0x%x" % (hash("123") & 0xffffffffffffffff))
class TestOVSFirewallDriver(base.TestCase):
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
def setUp(self, mock_ovsdb_api, mock_get_port_ofport, mock_set_secure_mode,
mock_create_ovs_bridge, mock_setup_base_flows,
mock_check_ovs_firewall_restart,):
super(TestOVSFirewallDriver, self).setUp()
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_get_port_ofport.return_value = 5
self.ovs_firewall = ovs_fw.OVSFirewallDriver()
self.ovs_firewall.sg_br = mock.Mock()
self.mock_br = ovs_lib.DeferredOVSBridge(self.ovs_firewall.sg_br)
self.LOG = ovs_fw.LOG
def test_get_compact_port(self):
compact_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'device': "123",
'security_groups': "abc",
'lvid': "100"}
res = self.ovs_firewall._get_compact_port(fake_port)
self.assertEqual(compact_port, res)
def test_remove_ports_from_provider_cache(self):
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['123', '125'])
self.assertEqual(set(['124']), self.ovs_firewall.provider_port_cache)
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['121', '125'])
self.assertEqual(set(['123', '124']),
self.ovs_firewall.provider_port_cache)
def test_add_ovs_flow(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal")
mock_add_flow.assert_called_with(priority=0, actions='normal',
table=1)
def test_add_ovs_flow_with_protocol(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with protocol
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
protocol="arp")
mock_add_flow.assert_called_with(table=1, priority=0,
proto="arp", actions="normal")
def test_add_ovs_flow_with_dest_mac(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with dl_dest
dest_mac = "01:00:00:00:00:00"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
dl_dest=dest_mac)
mock_add_flow.assert_called_with(table=1, priority=0,
dl_dst=dest_mac,
actions="normal")
def test_add_ovs_flow_with_tcpflag(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with tcp_flags
t_flag = "+rst"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
tcp_flag=t_flag)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_TCP,
tcp_flags=t_flag,
actions="normal")
def test_add_ovs_flow_with_icmptype(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with icmp_req_type
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
icmp_req_type=11)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_ICMP,
icmp_type=11, actions="normal")
def test_add_ports_to_filter(self):
self.ovs_firewall.filtered_ports = {}
self.ovs_firewall.add_ports_to_filter([fake_port])
self.assertIsNotNone(self.ovs_firewall.filtered_ports)
ret_port = self.ovs_firewall.filtered_ports["123"]
self.assertEqual(fake_res_port, ret_port)
def test_setup_aap_flows(self):
port_with_app = copy.deepcopy(fake_port)
key = "allowed_address_pairs"
port_with_app[key] = [{'ip_address': '10.0.0.2',
'mac_address': 'aa:bb:cc:dd:ee:aa'},
{'ip_address': '10.0.0.3',
'mac_address': 'aa:bb:cc:dd:ee:ab'}]
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.