repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
moreati/django-userena
|
userena/tests/test_backends.py
|
Python
|
bsd-3-clause
| 2,608
| 0.001917
|
from django.test import TestCase
from django.contrib.auth import authenticate
from userena.backends import UserenaAuthenticationBackend
from userena.utils import get_user_model
User = get_user_model()
class UserenaAuthenticationBackendTests(TestCase):
"""
Test the ``UserenaAuthenticationBackend`` which should return a ``User``
when supplied with a username/email and a correct password.
"""
fixtures = ['users',]
backend = UserenaAuthenticationBackend()
def test_with_username(self):
""" Test the backend when usernames are supplied. """
# Invalid usernames or passwords
invalid_data_dicts = [
# Invalid password
{'identification': 'john',
'password': 'inhalefish'},
# Invalid username
{'identification': 'alice',
'password': 'blowfish'},
]
for invalid_dict in invalid_data_dicts:
result = self.backend.authenticate(identification=invalid_dict['identification'],
password=invalid_dict['password'])
self.failIf(isinstance(result, User))
# Valid username and password
result = self.backend.authenticate(identification='john',
password='blowfish')
self.failUnless(isinstance(result, User))
def test_with_email(self):
""" Test the backend when email address is supplied """
# Invalid e-mail adressses or passwords
invalid_data_dicts = [
# Invalid password
{'identification': 'john@example.com',
'password': 'inhalefish'},
# Invalid e-mail address
{'identification': 'alice@example.com',
'password': 'blowfish'},
]
for invalid_dict in invalid_data_dicts:
result = self.backend.authenticate(identification=invalid_dict['identification'],
password=invalid_dict['password'])
self.failIf(isinstance(result, User))
# Valid e-email address and password
result = self.backend.authenticate(identi
|
fication='john@example.com',
password='blowfish')
self.failUnless(isinstance(result, User))
def test_get_user(self):
""" Test that the user is returned """
user = self.backend.get_user(1)
self.failUnlessEqual(user.username, 'john')
|
# None should be returned when false id.
user = self.backend.get_user(99)
self.failIf(user)
|
Gussy/mavlink
|
pymavlink/generator/lib/genxmlif/__init__.py
|
Python
|
lgpl-3.0
| 3,040
| 0.003618
|
#
# genxmlif, Release 0.9.0
# file: __init__.py
#
# genxmlif package file
#
# history:
# 2005-04-25 rl created
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generic XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
######################################################################
# PUBLIC DEFINITIONS
######################################################################
# supported XML interfaces
XMLIF_MINIDOM = "XMLIF_MINIDOM"
XMLIF_4DOM = "XMLIF_4DOM"
XMLIF_ELEMENTTREE = "XMLIF_ELEMENTTREE"
# namespace definitions
XINC_NAMESPACE = "http://www.w3.org/2001/XInclude"
# definition of genxmlif path
import os
GENXMLIF_DIR = os.path.dirname(__file__)
########################################
# central function to choose the XML interface to be used
#
def chooseXmlIf (xmlIf, verbose=0, useCaching=1, processXInclude=1):
if xmlIf == XMLIF_MINIDOM:
import xmlifMinido
|
m
return xmlifMinidom.XmlInterfaceMinidom(verbose, useCaching, processXInclude)
elif xmlIf == XMLIF_4DOM:
import xmlif4Dom
return xmlif4Dom.XmlInterface4Dom(verbose, useCaching, processXInclude)
elif xmlIf == XMLIF_ELEMENTTREE:
import xmlifElementTree
return xmlifElementTr
|
ee.XmlInterfaceElementTree(verbose, useCaching, processXInclude)
else:
raise AttributeError, "Unknown XML interface: %s" %(xmlIf)
########################################
# define own exception for GenXmlIf errors
# The following errors/exceptions are mapped to a GenxmlIf exception:
# - Expat errors
# - XInclude errors
#
class GenXmlIfError (StandardError):
pass
|
albfan/terminator
|
setup.py
|
Python
|
gpl-2.0
| 9,520
| 0.013655
|
#!/usr/bin/env python2
from distutils.core import setup
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.command.install_data import install_data
from distutils.command.build import build
from distutils.dep_util import newer
from distutils.log import warn, info, error
from distutils.errors import DistutilsFileError
import glob
import os
import sys
import subprocess
import platform
from terminatorlib.version import APP_NAME, APP_VERSION
PO_DIR = 'po'
MO_DIR = os.path.join('build', 'mo')
CSS_DIR = os.path.join('terminatorlib', 'themes')
class TerminatorDist(Distribution):
global_options = Distribution.global_options + [
("build-documentation", None, "Build the documentation"),
("install-documentation", None, "Install the documentation"),
("without-gettext", None, "Don't build/install gettext .mo files"),
("without-icon-cache", None, "Don't attempt to run gtk-update-icon-cache")]
def __init__ (self, *args):
self.without_gettext = False
self.without_icon_cache = False
Distribution.__init__(self, *args)
class BuildData(build):
def run (self):
build.run (self)
if not self.distribution.without_gettext:
# Build the translations
for po in glob.glob (os.path.join (PO_DIR, '*.po')):
lang = os.path.basename(po[:-3])
mo = os.path.join(MO_DIR, lang, 'LC_MESSAGES', 'terminator.mo')
directory = os.path.dirname(mo)
if not os.path.exists(directory):
info('creating %s' % directory)
os.makedirs(directory)
if newer(po, mo):
info('compiling %s -> %s' % (po, mo))
try:
rc = subprocess.call(['msgfmt', '-o', mo, po])
if rc != 0:
raise Warning, "msgfmt returned %d" % rc
except Exception, e:
error("Building gettext files failed. Ensure you have gettext installed. Alternatively, try setup.py --without-gettext [build|install]")
error("Error: %s" % str(e))
sys.exit(1)
TOP_BUILDDIR='.'
INTLTOOL_MERGE='intltool-merge'
desktop_in='data/terminator.desktop.in'
desktop_data='data/terminator.desktop'
rc = os.system ("C_ALL=C " + INTLTOOL_MERGE + " -d -u -c " + TOP_BUILDDIR +
"/po/.intltool-merge-cache " + TOP_BUILDDIR + "/po " +
desktop_in + " " + desktop_data)
if rc != 0:
# run the desktop_in through a command to strip the "_" characters
with open(desktop_in) as file_in, open(desktop_data, 'w') as file_data:
[file_data.write(line.lstrip('_')) for line in file_in]
appdata_in='data/terminator.appdata.xml.in'
appdata_data='data/terminator.appdata.xml'
rc = os.system ("C_ALL=C " + INTLTOOL_MERGE + " -x -u -c " + TOP_BUILDDIR +
"/po/.intltool-merge-cache " + TOP_BUILDDIR + "/po " +
appdata_in + " " + appdata_data)
if rc != 0:
# run the appdata_in through a command to strip the "_" characters
with open(appdata_in) as file_in, open(appdata_data, 'w') as file_data:
[file_data.write(line.replace('<_','<').replace('</_','</')) for line in file_in]
class Uninstall(Command):
description = "Attempt an uninstall from an install --record file"
user_options = [('manifest=', None, 'Installation record filename')]
def initialize_options(self):
self.manifest = None
def finalize_options(self):
pass
def get_command_name(self):
return 'uninstall'
def run(self):
f = None
self.ensure_filename('manifest')
try:
try:
if not self.manifest:
raise DistutilsFileError("Pass manifest with --manifest=file")
f = open(self.manifest)
files = [file.strip() for file in f]
except IOError, e:
raise DistutilsFileError("unable to open install manifest: %s", str(e))
finally:
if f:
f.close()
for file in files:
if os.path.isfile(file) or os.path.islink(file):
info("removing %s" % repr(file))
if not self.dry_run:
try:
os.unlink(file)
except OSError, e:
warn("could not delete: %s" % repr(file))
elif not os.path.isdir(file):
info("skipping %s" % repr(file))
dirs = set()
for file in reversed(sorted(files)):
dir = os.path.dirname(file)
if dir not in dirs and os.path.isdir(dir) and len(os.listdir(dir)) == 0:
dirs.add(dir)
# Only nuke empty Python library directories, else we could destroy
# e.g. locale directories we're the only app with a .mo installed for.
if dir.find("site-packages/") > 0:
info("removing %s" % repr(dir))
if not self.dry_run:
try:
os.rmdir(dir)
except OSError, e:
warn("could not remove directory: %s" % str(e))
else:
info("skipping empty directory %s" % repr(dir))
class InstallData(install_data):
def run (self):
self.data_files.extend (self._find_css_files ())
self.data_files.extend (self._find_mo_files ())
install_data.run (self)
if not self.distribution.without_icon_cache:
self._update_icon_cache ()
# We should do this on uninstall too
def _update_icon_cache(self):
info("running gtk-update-icon-cache")
try:
subprocess.call(["gtk-update-icon-cache", "-q", "-f", "-t", os.path.join(self.install_dir, "share/icons/hicolor")])
except Exception, e:
warn("updating the GTK icon cache failed: %s" % str(e))
def _find_mo_files (self):
data_files = []
if not self.distribution.without_gettext:
for mo in glob.glob (os.path.join (MO_DIR, '*', 'LC_MESSAGES', 'terminator.mo')):
lang = os.path.basename(os.path.dirname(os.path.dirname(mo)))
dest = os.path.join('share', 'locale', lang, 'LC_MESSAGES')
data_files.append((dest, [mo]))
return data_files
def _find_css_files (self):
data_files = []
for css_dir in glob.glob (os.path.join (CSS_DIR, '*')):
srce = glob.glob (os.pat
|
h.join(css_dir, 'gtk-3.0', 'apps', '*.css'))
dest = os.path.join('share', 'terminator', css_dir, 'gtk-3.0', 'apps')
data_files.append((dest, srce))
return data_files
class Test(Command):
user_options = []
def initialize_options(self):
pass
def final
|
ize_options(self):
pass
def run(self):
import subprocess
import sys
errno = subprocess.call(['bash', 'run_tests'])
raise SystemExit(errno)
if platform.system() in ['FreeBSD', 'OpenBSD']:
man_dir = 'man'
else:
man_dir = 'share/man'
setup(name=APP_NAME,
version=APP_VERSION,
description='Terminator, the robot future of terminals',
author='Chris Jones',
author_email='cmsj@tenshu.net',
url='https://gnometerminator.blogspot.com/p/introduction.html',
license='GNU GPL v2',
scripts=['terminator', 'remotinator'],
data_files=[
('bin', ['terminator.wrapper']),
('share/appdata', ['data/terminator.appdata.xml']),
('share/applications', ['data/terminator.desktop']),
(os.path.join(man_dir, 'man1'), ['doc/terminator.1']),
(os.path.join(man_dir, 'man5'), ['doc/terminator_config.5']),
('share/pixmaps', ['data/icons/hicolor/48x48/apps/terminator.png']),
('share/icons/hicolor/scalable/apps', glob.glob('data/icons/hicolor/scalable/apps/*.svg')),
('share/icons/hicolor/16x16/apps', glob.glob('data/icons/hicolor/16x16/apps/*.png')),
('share/icons/hicolor/22x22/apps', glob.glob('data/icons/hicolor/22x22/apps/*.png')),
('share/icons/hicolor/24x24/apps', glob.glob('data/icons/hicolor/24x24/apps/*.png')),
('share/icons/hicolor/32x32/apps', glob.glob('data/icons/hicolor/32x32/apps/*.png')),
('share/icons/hicolor/48x48/apps', glob.glob('data/icons/hicolor/48x48/apps/*.png')),
('share/icons/hicolor/16x16/actions', glob.glob('data/icons/hicolor/16x16/actions/*.png')),
('share/icons/hic
|
abhikeshav/ydk-py
|
core/ydk/mdt/proto_to_dict.py
|
Python
|
apache-2.0
| 2,788
| 0.001793
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www
|
.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing
|
, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
#
# Copied from the bigmuddy collector
#
from google.protobuf.message import Message
from google.protobuf.descriptor import FieldDescriptor
DECODE_FN_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: long,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: long,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: long,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: long,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: long,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: unicode,
FieldDescriptor.TYPE_BYTES: lambda b: bytes_to_string(b),
FieldDescriptor.TYPE_ENUM: int,
}
def bytes_to_string (bytes):
"""
Convert a byte array into a string aa:bb:cc
"""
return ":".join(["{:02x}".format(int(ord(c))) for c in bytes])
def field_type_to_fn(msg, field):
if field.type == FieldDescriptor.TYPE_MESSAGE:
# For embedded messages recursively call this function. If it is
# a repeated field return a list
result = lambda msg: proto_to_dict(msg)
elif field.type in DECODE_FN_MAP:
result = DECODE_FN_MAP[field.type]
else:
raise TypeError("Field %s.%s has unrecognised type id %d" % (
msg.__class__.__name__, field.name, field.type))
return result
def proto_to_dict(msg):
result_dict = {}
extensions = {}
for field, value in msg.ListFields():
conversion_fn = field_type_to_fn(msg, field)
# Skip extensions
if not field.is_extension:
# Repeated fields result in an array, otherwise just call the
# conversion function to store the value
if field.label == FieldDescriptor.LABEL_REPEATED:
result_dict[field.name] = [conversion_fn(v) for v in value]
else:
result_dict[field.name] = conversion_fn(value)
return result_dict
|
kaimast/inanutshell
|
linear/common/launch.py
|
Python
|
bsd-2-clause
| 842
| 0.008314
|
import subprocess
import math
def launch(script_name, num_partitions, num_machines, pos, coordinator, machine_name, debug=False):
i
|
f num_machines > num_partitions:
raise RuntimeError("Need more partitions than machine")
machine_size = int(math.ceil(float(num_partitions) / float(num_machines)))
start = pos * machine_size
end = min((pos+1)*machine_size, num_partitions)
processes = []
#launch content server processes
for i in range(start, end):
print("Starting processs #" + str
|
(i))
if debug:
p = subprocess.Popen(["python3-dbg", script_name, coordinator, machine_name, str(i)])
else:
p = subprocess.Popen([script_name, coordinator, machine_name, str(i)])
processes.append(p)
while processes:
p = processes.pop()
p.wait()
|
jaustinpage/frc_rekt
|
docs/conf.py
|
Python
|
mit
| 5,302
| 0.000943
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# frc_rekt documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 12 00:19:47 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'frc_rekt'
copyright = '2017, Author'
author = 'Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'frc_rektdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. Li
|
st of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'frc_rekt.tex', 'frc\\_rekt Documentation',
'Author', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tu
|
ples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'frc_rekt', 'frc_rekt Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'frc_rekt', 'frc_rekt Documentation',
author, 'frc_rekt', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
frreiss/tensorflow-fred
|
tensorflow/lite/python/metrics_nonportable_test.py
|
Python
|
apache-2.0
| 24,385
| 0.004757
|
# Lint as: python2, python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python metrics helper TFLiteMetrics check."""
import gc
import os
import tempfile
import time
from unittest import mock
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.lite.python import lite
from tensorflow.lite.python import metrics_nonportable as metrics
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.convert import register_custom_opdefs
from tensorflow.lite.python.metrics_wrapper import converter_error_data_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.importer import import_graph_def
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.tracking import tracking
class MetricsNonportableTest(test_util.TensorFlowTestCase):
def test_TFLiteMetrics_creation_no_arg_success(self):
metrics.TFLiteMetrics()
def test_TFLiteMetrics_creation_arg_success(self):
metrics.TFLiteMetrics('hash', '/path/to/model')
def test_TFLiteMetrics_creation_fails_with_only_hash(self):
with self.assertRaises(ValueError):
metrics.TFLiteMetrics(model_hash='hash')
def test_TFLiteMetrics_creation_fail2_with_only_model_path(self):
with self.assertRaises(ValueError):
metrics.TFLiteMetrics(model_path='/path/to/model')
def test_debugger_creation_counter_increase_multiple_same_topic_success(self):
try:
stub = metrics.TFLiteMetrics()
stub.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 1)
stub2 = metrics.TFLiteMetrics()
stub2.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 2)
del stub
gc.collect()
stub2.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 3)
except:
raise Exception('No exception should be raised.')
def test_interpreter_creation_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_interpreter_creation()
self.assertEqual(
metrics._counter_interpreter_creation.get_cell('python').value(), 1)
def test_converter_attempt_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_converter_attempt()
self.assertEqual(metrics._counter_conversion_attempt.get_cell().value(), 1)
def test_converter_success_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_converter_success()
self.assertEqual(metrics._counter_conversion_success.get_cell().value(), 1)
def test_converter_params_set_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name', 'value')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name').value(), 'value')
def test_converter_params_multiple_set_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name', 'value')
stub.set_converter_param('name', 'value1')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name').value(), 'value1')
def test_converter_params_multiple_label_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name1', 'value1')
stub.set_converter_param('name2', 'value2')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name1').value(), 'value1')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name2').value(), 'value2')
def test_converter_params_set_latency(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_latency(34566)
self.assertEqual(metrics._gauge_conversion_latency.get_cell().value(),
34566)
class ConverterMetricsTest(test_util.TensorFlowTestCase):
"""Testing conversion metrics."""
def _constructGraphDef(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=
|
[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor')
math_ops.add(in_tensor, in_tensor, name='add')
sess = session.Session()
return (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
def test_conversion_from_constructor_success(self):
frozen_graph_def = self._constructGraphDef()
# Check metrics when conversion successed.
converter = lite.TFLiteCo
|
nverter(frozen_graph_def, None, None,
[('in_tensor', [2, 16, 16, 3])], ['add'])
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.export_metrics(),
mock.call.set_converter_param('input_format', '1'),
mock.call.set_converter_param('enable_mlir_converter', 'True'),
mock.call.set_converter_param('allow_custom_ops', 'False'),
mock.call.set_converter_param('api_version', '1'),
], any_order=True) # pyformat: disable
def test_conversion_from_constructor_fail(self):
frozen_graph_def = self._constructGraphDef()
# Check metrics when conversion failed.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('wrong_tensor', [2, 16, 16, 3])],
['add'])
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
with self.assertRaises(ConverterError):
converter.convert()
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.set_converter_param('output_format', '2'),
mock.call.set_converter_param('select_user_tf_ops', 'None'),
mock.call.set_converter_param('post_training_quantize', 'False'),
], any_order=True) # pyformat: disable
mock_metrics.increase_counter_converter_success.assert_not_called()
def _getIntegerQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])
def func(inp):
conv = tf.nn.conv2d(
inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (root, to_save, calibration_ge
|
meidli/yabgp
|
yabgp/message/attribute/linkstate/__init__.py
|
Python
|
apache-2.0
| 3,042
| 0
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .linkstate import LinkState # noqa
from .node.local_router_id import LocalRouterID # noqa
from .node.name import NodeName # noqa
from .node
|
.isisarea import ISISArea # noqa
from .node.sr_capabilities import SRCapabilities # noqa
from .node.sr_algorithm import SRAlgorithm # noqa
from .node.node_msd import NodeMSD # noqa
from .node.nodeflags import NodeFlags # noqa
from .node.opa_node_attr import OpaNodeAttr # noqa
from .node.sid_or_label import SIDorLabel # noqa
from .node.srlb import SRLB # noqa
from .link.adm
|
ingroup import AdminGroup # noqa
from .link.remote_router_id import RemoteRouterID # noqa
from .link.max_bw import MaxBandwidth # noqa
from .link.max_rsv_bw import MaxResvBandwidth # noqa
from .link.unsrv_bw import UnrsvBandwidth # noqa
from .link.te_metric import TeMetric # noqa
from .link.link_name import LinkName # noqa
from .link.igp_metric import IGPMetric # noqa
from .link.adj_seg_id import AdjSegID # noqa
from .link.link_identifiers import LinkIdentifiers # noqa
from .link.link_msd import LinkMSD # noqa
from .link.lan_adj_sid import LanAdjSegID # noqa
from .link.srlg import SRLGList # noqa
from .link.mplsmask import MplsMask # noqa
from .link.protection_type import ProtectionType # noqa
from .link.opa_link_attr import OpaLinkAttr # noqa
from .link.peer_node_sid import PeerNodeSID # noqa
from .link.peer_adj_sid import PeerAdjSID # noqa
from .link.peer_set_sid import PeerSetSID # noqa
from .link.unidirect_link_delay import UnidirectLinkDelay # noqa
from .link.min_max_link_delay import MinMaxUnidirectLinkDelay # noqa
from .link.unidirect_delay_var import UnidirectDelayVar # noqa
from .link.unidirect_packet_loss import UnidirectPacketLoss # noqa
from .link.unidirect_residual_bw import UnidirectResidualBw # noqa
from .link.unidirect_avail_bw import UnidirectAvailBw # noqa
from .link.unidirect_bw_util import UnidirectBwUtil # noqa
from .prefix.prefix_metric import PrefixMetric # noqa
from .prefix.prefix_sid import PrefixSID # noqa
from .prefix.prefix_igp_attr import PrefixIGPAttr # noqa
from .prefix.src_router_id import SrcRouterID # noqa
from .prefix.igpflags import IGPFlags # noqa
from .prefix.igp_route_tag_list import IGPRouteTagList # noqa
from .prefix.ext_igp_route_tag_list import ExtIGPRouteTagList # noqa
from .prefix.ospf_forward_addr import OspfForwardingAddr # noqa
|
MIPS/external-chromium_org-tools-gyp
|
pylib/gyp/xcode_emulation.py
|
Python
|
bsd-3-clause
| 44,910
| 0.007459
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# If you need this, speak up at http://crbug.com/122592
conditional_keys = [key for key in self.xcode_settings[configname]
if key.endswith(']')]
if conditional_keys:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
for key in conditional_keys:
del self.xcode_settings[configname][key]
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library')
|
or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extensio
|
n' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
|
aerialhedgehog/VyPy
|
trunk/VyPy/tools/wait.py
|
Python
|
bsd-3-clause
| 1,311
| 0.039664
|
import time
from random import random
import traceback
WARN_TIME = 3600.
def wait(check,timeout=None,delay=0.5, *args,**kwarg):
start_time = time.time()
warned = False
while True:
try:
result = check(*args,**kwarg)
break
except Exception as exc:
if timeout and (time.time()-start_time) > timeout:
raise exc
#if (time.time()-start_time) > WARN_TIME and not warned:
#print "wait(): warning, waiting for a long time - \n%s" % traceback.format_exc()
#warned = True
time.sleep( delay*(1.+0.2*random()) )
return result
#class wait(object):
#def __init__(self,block=True, timeout=None, interval=1.0):
#self.block = block
#self.timeout = timeout
#self.interval = interval
#self._start_time = time.time()
#def __iter__(self):
#return self
#def next(self):
|
#if self.timeout and (time.time()-self._start_time) > self.timeout:
#raise StopIteration
#time.sleep(self.interval)
#try:
#return True
#exce
|
pt:
#print 'caught'
#for now in wait(timeout=None):
#print 'hello!'
|
maurizi/otm-core
|
opentreemap/treemap/views/user.py
|
Python
|
agpl-3.0
| 9,600
| 0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import collections
from registration.models import RegistrationProfile
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db.models.expressions import RawSQL
from django.db.models.functions import Length
from django.http import HttpResponseRedirect
from django.http.request import QueryDict
from django.shortcuts import render, get_object_or_404
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from opentreemap.util import json_from_request, dotted_split
from treemap.decorators import get_instance_or_404
from treemap.images import save_image_from_request
from treemap.util import package_field_errors
from treemap.models import User, Favorite, MapFeaturePhoto, InstanceUser
from treemap.lib.user import get_audits, get_user_instances, get_audits_params
USER_PROFILE_FIELDS = collections.OrderedDict([
('first_name',
{'label': _('First Name'),
'identifier': 'user.first_name',
'visibility': 'public'}),
('last_name',
{'label': _('Last Name'),
'identifier': 'user.last_name',
'visibility': 'public'}),
('organization',
{'label': _('Organization'),
'identifier': 'user.organization',
'visibility': 'public'}),
('make_info_public',
{'label': _('Make Info Visible'),
'identifier': 'user.make_info_public',
'visibility': 'private',
'template': "treemap/field/make_info_public_div.html"}),
('email',
{'label': _('Email'),
'identifier': 'user.email',
'visibility': 'private'}),
('allow_email_contact',
{'label': _('Email Updates'),
'identifier': 'user.allow_email_contact',
'visibility': 'private',
'template': "treemap/field/email_subscription_div.html"})
])
def user_audits(request, username):
user = get_object_or_404(User, username=username)
instance_id = request.GET.get('instance_id', None)
instance = (get_instance_or_404(pk=instance_id)
if instance_id else None)
params = get_audits_params(request)
return get_audits(request.user, instance, request.GET.copy(), user=user,
**params)
def instance_user_audits(request, instance_url_name, username):
instance = get_instance_or_404(url_name=instance_url_name)
return HttpResponseRedirect(
reverse('user_audits', kwargs={'username': username})
+ '?instance_id=%s' % instance.pk)
def update_user(request, user):
new_values = json_from_request(request) or {}
for key in new_values:
try:
model, field = dotted_split(key, 2, cls=ValueError)
if model != 'user':
raise ValidationError(
'All fields should be prefixed with "user."')
if field not in USER_PROFILE_FIELDS:
raise ValidationError(field + ' is not an updatable field')
except ValueError:
raise ValidationError('All fields should be prefixed with "user."')
setattr(user, field, new_values[key])
try:
user.save()
return {"ok": True}
except ValidationError as ve:
raise ValidationError(package_field_errors('user', ve))
def upload_user_photo(request, user):
"""
Saves a user profile photo whose data is in the request.
The callee or decorator is reponsible for ensuring request.user == user
"""
user.photo, user.thumbnail = save_image_from_request(
request, name_prefix="user-%s" % user.pk, thumb_size=(85, 85))
user.save_with_user(request.user)
return {'url': user.thumbnail.url}
def instance_user(request, instance_url_name, username):
instance = get_instance_or_404(url_name=instance_url_name)
url = reverse('user', kwargs={'username': username}) +\
'?instance_id=%s' % instance.pk
return HttpResponseRedirect(url)
def profile_to_user(request):
if request.user and request.user.username:
return HttpResponseRedirect('/users/%s/' % request.user.username)
else:
return HttpResponseRedirect(settings.LOGIN_URL)
def forgot_username(request):
user_email = request.POST['email']
if not user_email:
raise ValidationError({
'user.email': [_('Email field is required')]
})
users = User.objects.filter(email=user_email)
# Don't reveal if we don't have that email, to prevent email harvesting
if len(users) == 1:
user = users[0]
password_reset_url = request.build_absolute_uri(
reverse('auth_password_reset'))
subject = _('Account Recovery')
body = render_to_string('treemap/partials/forgot_username_email.txt',
{'user': user,
'password_url': password_reset_url})
user.email_user(subject, body, settings.DEFAULT_FROM_EMAIL)
return {'email': user_emai
|
l}
def resend_activation_ema
|
il_page(request):
return {'username': request.GET.get('username')}
def resend_activation_email(request):
username = request.POST['username']
def error(error):
return render(request, 'treemap/resend_activation_email.html',
{'username': username, 'error': error})
if not username:
return error(_('Username field is required'))
users = User.objects \
.filter(username=username)
if len(users) != 1:
return error(_('There is no user with that username'))
user = users[0]
if user.is_active:
return error(_('This user has already been verified'))
success = RegistrationProfile.objects.resend_activation_mail(
users[0].email, RequestSite(request), request)
if not success:
return error(_('Unable to resend activation email'))
return {'user': user}
def _small_feature_photo_url(feature):
feature = feature.cast_to_subtype()
if feature.is_plot:
tree = feature.current_tree()
if tree:
photos = tree.photos()
else:
photos = MapFeaturePhoto.objects.none()
else:
photos = feature.photos()
if len(photos) > 0:
return photos[0].thumbnail.url
else:
return None
def user(request, username):
user = get_object_or_404(User, username=username)
instance_id = request.GET.get('instance_id', None)
instance = (get_instance_or_404(pk=instance_id)
if instance_id else None)
query_vars = QueryDict(mutable=True)
if instance_id:
query_vars['instance_id'] = instance_id
audit_dict = get_audits(request.user, instance, query_vars,
user=user, should_count=True)
reputation = user.get_reputation(instance) if instance else None
favorites_qs = Favorite.objects.filter(user=user).order_by('-created')
favorites = [{
'map_feature': f.map_feature,
'title': f.map_feature.title(),
'instance': f.map_feature.instance,
'address': f.map_feature.address_full,
'photo': _small_feature_photo_url(f.map_feature)
} for f in favorites_qs]
public_fields = []
private_fields = []
for field in USER_PROFILE_FIELDS.values():
field_tuple = (field['label'], field['identifier'],
field.get('template', "treemap/field/div.html"))
if field['visibility'] == 'public' and user.make_info_public is True:
public_fields.append(field_tuple)
else:
private_fields.append(field_tuple)
return {'user': user,
'its_me': user.id == request.user.id,
'reputation': reputation,
'instance_id': instance_id,
'instances': get_user_instances(request.user, user, instance),
'total_edits': audit_dict['total_count'],
'audits': audit_dict['audits'],
'next_page': audit_dict['next_page'],
'public_fields': public_fields,
'private_
|
maroy/TSTA
|
cse-581-project-2/src/extract_keywords.py
|
Python
|
mit
| 1,499
| 0.002668
|
import re
import json
import sqlite3
import nltk
stop = nltk.corpus.stopwords.words("english")
stop.append('rt')
contractions = []
with open('contractions.txt', 'rb') as f:
contractions = [c.strip() for c in f.readlines()]
lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
url_reg
|
ex = re.compile(r"http[s]?[^\s]*")
contractions_regex = re.compile("|".join(contractions))
first_tweet = None
con = sqlite3.connect("D:/TWEETS/2014-11-05-22-45-54.db")
c = con.cursor()
with open("out.csv", "wb") as out_file:
for row in c.execute("SELECT tweet FROM tweets"):
if first_tweet is None:
first_tweet = row[0]
j = json.loads(row[0])
tweet_id = j['id']
timestamp = j['timestamp_ms']
text = j['text
|
']
text = text.lower()
text = url_regex.sub('', text)
text = contractions_regex.sub('', text)
all_tokens = tokenizer.tokenize(text)
tokens = []
for token in all_tokens:
token = lemmatizer.lemmatize(token)
if token not in stop:
tokens.append(token)
#items = [str(id), json.dumps(text)] + [token.encode('utf8') for token in tokens]
items = [str(tweet_id), str(timestamp)] + [token.encode('utf8') for token in tokens]
out_file.write(" ".join(items) + "\n")
with open("tweet.json", "wb") as f:
f.write(first_tweet)
con.close()
|
bmya/odoo-support
|
adhoc_modules_server/octohub/exceptions.py
|
Python
|
lgpl-3.0
| 701
| 0.002853
|
# Copyright (c) 2013 Alo
|
n Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import simplejson as json
class ResponseError(Exception):
"""Accessible attributes: error
error (AttrDict): Parsed erro
|
r response
"""
def __init__(self, error):
Exception.__init__(self, error)
self.error = error
def __str__(self):
return json.dumps(self.error, indent=1)
class OctoHubError(Exception):
pass
|
forestdussault/olc_webportalv2
|
olc_webportalv2/new_multisample/models.py
|
Python
|
mit
| 8,667
| 0.000923
|
from django.db import models
from olc_webportalv2.users.models import User
from django.contrib.postgres.fields.jsonb import JSONField
import os
from django.core.exceptions import ValidationError
# Create your models here.
def validate_fastq(fieldfile):
filename = os.path.basename(fieldfile.name)
if filename.endswith('.fastq.gz') or filename.endswith('.fastq'):
print('File extension for {} confirmed valid'.format(filename))
else:
raise ValidationError(
_('%(file)s does not end with .fastq or .fastq.gz'),
params={'filename': filename},
)
class ProjectMulti(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
project_title = models.CharField(max_length=256)
description = models.CharField(max_length=200, blank=True)
date = models.DateTimeField(auto_now_add=True)
forward_id = models.CharField(max_length=256, default='_R1')
reverse_id = models.CharField(max_length=256, default='_R2')
def __str__(self):
return self.project_title
class Sample(models.Model):
project = models.ForeignKey(ProjectMulti, on_delete=models.CASCADE, related_name='samples')
file_R1 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_R2 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_fasta = models.FileField(upload_to='%Y%m%d%s', blank=True)
title = models.CharField(max_length=200, blank=True)
genesippr_status = models.CharField(max_length=128,
default="Unprocessed")
sendsketch_status = models.CharField(max_length=128,
default="Unprocessed")
confindr_status = models.CharField(max_length=128,
default="Unprocessed")
genomeqaml_status = models.CharField(max_length=128,
default="Unprocessed")
amr_status = models.CharField(max_length=128,
default="Unprocessed")
def __str__(self):
return self.title
class GenomeQamlResult(models.Model):
class Meta:
verbose_name_plural = "GenomeQAML Results"
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genomeqaml_result')
predicted_class = models.CharField(max_length=128, default='N/A')
percent_fail = models.CharField(max_length=128, default='N/A')
percent_pass = models.CharField(max_length=128, default='N/A')
percent_reference = models.CharField(max_length=118, default='N/A')
def __str__(self):
return '{}'.format(self.sample)
class SendsketchResult(models.Model):
class Meta:
verbose_name_plural = "Sendsketch Results"
def __str__(self):
return 'pk {}: Rank {}: Sample {}'.format(self.pk, self.rank, self.sample.pk)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
rank = models.CharField(max_length=8, default='N/A')
wkid = models.CharField(max_length=256, default='N/A')
kid = models.CharField(max_length=256, default='N/A')
ani = models.CharField(max_length=256, default='N/A')
complt = models.CharField(max_length=256, default='N/A')
contam = models.CharField(max_length=256, default='N/A')
matches = models.CharField(max_length=256, default='N/A')
unique = models.CharField(max_length=256, default='N/A')
nohit = models.CharField(max_length=256, default='N/A')
taxid = models.CharField(max_length=256, default='N/A')
gsize = models.CharField(max_length=256, default='N/A')
gseqs = models.CharField(max_length=256, default='N/A')
taxname = models.CharField(max_length=256, default='N/A')
class GenesipprResults(models.Model):
# For admin panel
def __str__(self):
return '{}'.format(self.sample)
# TODO: Accomodate seqID
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genesippr_results')
# genesippr.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
# STEC
serotype = models.CharField(max_length=256, default="N/A")
o26 = models.CharField(max_length=256, default="N/A")
o45 = models.CharField(max_length=256, default="N/A")
o103 = models.CharField(max_length=256, default="N/A")
o111 = models.CharField(max_length=256, default="N/A")
o121 = models.CharField(max_length=256, default="N/A")
o145 = models.CharField(max_length=256, default="N/A")
o157 = models.CharField(max_length=256, default="N/A")
uida = models.CharField(max_length=256, default="N/A")
eae = models.CharField(max_length=256, default="N/A")
eae_1 = models.CharField(max_length=256, default="N/A")
vt1 = models.CharField(max_length=256, default="N/A")
vt2 = models.CharField(max_length=256, default="N/A")
vt2f = models.CharField(max_length=256, default="N/A")
# listeria
igs = models.CharField(max_length=256, default="N/A")
hlya = models.CharField(max_length=256, default="N/A")
inlj = models.CharField(max_length=256, default="N/A")
# salmonella
inva = models.CharField(max_length=256, default="N/A")
stn = models.CharField(max_length=256, default="N/A")
def inva_number(self):
return float(self.inva.split('%')[0])
def uida_number(self):
retu
|
rn float(self.uida.split('%')[0])
def vt1_number(self):
return float(self.vt1.split('%')[0])
def vt2_number(self):
return float(self.vt2.split('%')[0])
def vt2f_number(self):
return float(self.vt2f.
|
split('%')[0])
def eae_number(self):
return float(self.eae.split('%')[0])
def eae_1_number(self):
return float(self.eae_1.split('%')[0])
def hlya_number(self):
return float(self.hlya.split('%')[0])
def igs_number(self):
return float(self.igs.split('%')[0])
def inlj_number(self):
return float(self.inlj.split('%')[0])
class Meta:
verbose_name_plural = "Genesippr Results"
class GenesipprResultsSixteens(models.Model):
class Meta:
verbose_name_plural = "SixteenS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='sixteens_results')
# sixteens_full.csv
strain = models.CharField(max_length=256, default="N/A")
gene = models.CharField(max_length=256, default="N/A")
percentidentity = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
foldcoverage = models.CharField(max_length=256, default="N/A")
@property
def gi_accession(self):
# Split by | delimiter, pull second element which should be the GI#
gi_accession = self.gene.split('|')[1]
return gi_accession
class GenesipprResultsGDCS(models.Model):
class Meta:
verbose_name_plural = "GDCS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='gdcs_results')
# GDCS.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
matches = models.CharField(max_length=256, default="N/A")
meancoverage = models.CharField(max_length=128, default="N/A")
passfail = models.CharField(max_length=16, default="N/A")
allele_dict = JSONField(blank=True, null=True, default=dict)
class ConFindrResults(models.Model):
class Meta:
verbose_name_plural = 'Confindr Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='confindr_results')
strain = models.CharField(max_length=256, default="N/A")
genera_present = models.CharField(max_length=256, default="N/A")
contam_snvs = models.CharField(max_length=256, default="N/A")
contaminated = models.CharField(max_length=256, default="N/A")
class GenesipprResultsSerosippr(models.Model):
class Meta:
verbose_name_plural = "Serosippr Results"
def __str__(self
|
ros2/launch
|
launch_testing/test/launch_testing/test_flake8.py
|
Python
|
apache-2.0
| 830
| 0
|
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, \
'Found %d
|
code style errors / warnings:\n' % len(errors) + \
'\n'.join(errors)
|
marmorkuchen/jw2html
|
setup.py
|
Python
|
gpl-3.0
| 1,182
| 0.002538
|
import os
from setuptools import setup, find_packages
from jw2html import VERSION
setup(
name='JW2HTML',
version=VERSION,
description='JW2HTML converts an issue of the Jungle World from the website to a single HTML file to be used for epub conversion by e.g. calibre.',
long_description='Alas, there is no epub version of the Jungle World, http://jungle-world.com . Hence this little module to download the current issue and pack it into one HTML file which can then be converted to epub (using e.g. http://calibre-ebook.com). It also downloads the cover image for easy inclusion when creating the book in calibre.',
license='GPL',
keywords='jungle world newspaper html epub convert',
url='https://github.com/marmorkuchen/jw2html',
author='marmorkuchen',
author_email='marmorkuchen@kodeaffe.de',
packages=find_packages(),
include_package_data=True,
data_files=[
('doc', ['README.rst', 'LICENSE']),
(os.path.join(os.sep, '
|
etc'), ['jw2html.ini',]),
],
entry_points={
'console_scripts': [
|
'jw2html = jw2html:main',
]
},
install_requires=[
'beautifulsoup4',
],
)
|
GTACSolutions/python-braspag
|
python_braspag/decorators.py
|
Python
|
apache-2.0
| 849
| 0.001178
|
# coding: utf8
from functools import wraps
from logging import getLogger
logger = getLogger(__name__)
__author__ = 'marcos.costa'
class request_logger(object):
def __init__(self, method=None):
self.method = method
def __call__(self, func):
method = self.method
if method is None:
method = func.func_name
@wraps(func)
def wrapper(instance, request, *args, **kwargs):
response = func(instance, request, *args, **kwargs)
msg = ("\nCalled method: {method}\nrequest: {request}"
"\nresponse: {response}").format(method=method,
|
request=request,
|
response=response)
logger.info(msg)
return response
return wrapper
|
JunctionAt/JunctionWWW
|
blueprints/base.py
|
Python
|
agpl-3.0
| 504
| 0.001984
|
from werkzeug.local import LocalProxy
from . import extension_access
def extension_access_proxy(name):
return LocalProxy(lambda: getattr(extension_access, name, None))
# Mostly for backwards compati
|
bility
cache = extension_access_proxy("cache")
mongo = extension_access_proxy("mongo")
mail = extension_access_proxy("mail")
admin = extension_access_proxy("admin")
rest_api = extension_access_proxy("rest_api")
markdow
|
n = extension_access_proxy("markdown")
assets = extension_access_proxy("assets")
|
alex-zoltowski/SSBM-AI
|
AI/Characters/character.py
|
Python
|
gpl-3.0
| 5,818
| 0.004641
|
import AI.pad
import AI.state
class Character:
def __init__(self, pad_path):
self.action_list = []
self.last_action = 0
self.pad = AI.pad.Pad(pad_path)
self.state = AI.state.State()
#Set False to enable character selection
self.test_mode = True
self.sm = AI.state_manager.StateManager(self.state, self.test_mode)
#test_mode = False, Selects character each run
def make_action(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.Characters:
mm.pick_fox(self.state, self.pad)
elif self.state.menu == AI.state.Menu.Stages:
self.pad.tilt_stick(AI.pad.Stick.C, 0.5, 0.5)
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#test_mode = True, AI starts fighting each run, saves time during testing
def make_action_test(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#implemented by each character to decide what to do
#includes some states where each character will respond the same
def logic(self):
if AI.state.is_spawning(self.state.players[2].action_state):
self.tilt_stick(60, 'DOWN')
self.tilt_stick(3, None)
#compare AI's current state
def compare_AI_state(self, test_state):
return self.state.players[2].action_state is test_state
#compare P1 current state
def compare_P1_state(self, test_state):
re
|
turn self.state.players[0].action_state is test_state
#executes button presses defined in action_list, runs logic() once list is empty
def advance(self):
while self.action_list:
wait, func, args = self.action_list[0]
if self.state.frame - self.last_action < wait:
return
else:
self.action_list.pop(0)
|
if func is not None:
func(*args)
self.last_action = self.state.frame
else:
self.logic()
'''Methods simulate controller input; appends necessary tuple to action_list'''
def press_button(self, wait, button):
self.action_list.append((wait, self.pad.press_button, [button]))
def release_button(self, wait, button):
self.action_list.append((wait, self.pad.release_button, [button]))
def tilt_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.0]))
elif direction is 'DOWN_LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.25, 0.25]))
elif direction is 'DOWN_RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.75, 0.25]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.5]))
def tilt_c_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.0]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.5]))
def press_trigger(self, wait, amount):
self.action_list.append((wait, self.pad.press_trigger, [AI.pad.Trigger.L, amount]))
def wait(self, wait):
self.action_list.append((wait, None, []))
'''Execute actions shared among all characters'''
def style(self, wait):
pass
def side_b(self, wait):
self.tilt_stick(wait, 'RIGHT')
self.press_button(1, AI.pad.Button.B)
self.release_button(2, AI.pad.Button.B)
self.tilt_stick(2, None)
def shield(self, wait, length):
self.press_trigger(wait, 0.3)
self.press_trigger(length, 0.0)
def dashdance(self, wait, length):
self.wait(wait)
for _ in range(length):
self.tilt_stick(4, 'LEFT')
self.tilt_stick(4, 'RIGHT')
self.tilt_stick(1, None)
def shorthop(self, wait):
self.press_button(wait, AI.pad.Button.X)
self.release_button(1, AI.pad.Button.X)
'''Execute similar actions that is dependent on character frame data'''
def wavedash(self, wait, direction, wait_airdodge):
self.tilt_stick(wait, direction)
self.shorthop(1)
self.press_button(wait_airdodge, AI.pad.Button.L)
self.release_button(2, AI.pad.Button.L)
self.tilt_stick(1, None)
def shorthop_nair(self, wait, wait_attack, wait_ff):
self.shorthop(wait)
self.press_button(wait_attack, AI.pad.Button.A)
self.release_button(1, AI.pad.Button.A)
self.tilt_stick(wait_ff, 'DOWN')
self.tilt_stick(3, None)
self.press_trigger(2, 0.5)
self.press_trigger(1, 0.0)
|
JoshuaEbenezer/SNLP-reviews
|
SNLP/src/read_file.py
|
Python
|
gpl-3.0
| 795
| 0.023899
|
import sys
folder_loc = sys.argv[1]
filename =sys.argv[2]
fileobj = open(folder_loc + filename);
flag=0
real_rev = open(folder_loc + "real_"+filename,"w+")
fake_rev = open(folder_loc + "fake_"+filename,"w+")
for line in fileobj:
for i in range(len(line)):
if (line[i] == '[' and flag == 0): #for beginning of real reviews
flag = 1
elif (line[i-1]==
|
']' and flag == 1): #for end of real reviews
flag = 2
elif (line[i]=='[' and flag == 2): #for beginning of fake reviews
flag = 3
elif (line[i-1] ==
|
']' and flag == 3): #for end of fake reviews
flag = 4
if(flag ==1):
real_rev.write(line[i])
elif(flag==3):
fake_rev.write(line[i])
|
bluemini/kuma
|
vendor/packages/translate/filters/test_decoration.py
|
Python
|
mpl-2.0
| 3,921
| 0.004108
|
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import decoration
def test_spacestart():
"""test operation of spacestart()"""
assert decoration.spacestart(" Start") == " "
assert decoration.spacestart(u"\u0020\u00a0Start") == u"\u0020\u00a0"
# non-breaking space
assert decoration.spacestart(u"\u00a0\u202fStart") == u"\u00a0\u202f"
# Some exotic spaces
assert decoration.spacestart(u"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200aStart") == u"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a"
def test_isvalidaccelerator():
"""test the isvalidaccelerator() function"""
# Mostly this tests the old code path where acceptlist is None
assert not decoration.isvalidaccelerator(u"")
assert decoration.isvalidaccelerator(u"a")
assert decoration.isvalidaccelerator(u"1")
assert not decoration.isvalidaccelerator(u"ḽ")
# Test new code path where we actually have an acceptlist
assert decoration.isvalidaccelerator(u"a", u"aeiou")
assert decoration.isvalidaccelerator(u"ḽ", u"ḓṱḽṋṅ")
assert not decoration.isvalidaccelerator(u"a", u"ḓṱḽṋṅ")
def test_find_marked_variables():
"""check that we can identify variables correctly, the first returned
value is the start location, the second returned value is the actual
va
|
riable sans decoations"""
variables = decoration.findmarkedvariables("The <variable> string", "<", ">")
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The $vari
|
able string", "$", 1)
assert variables == [(4, "v")]
variables = decoration.findmarkedvariables("The $variable string", "$", None)
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The $variable string", "$", 0)
assert variables == [(4, "")]
variables = decoration.findmarkedvariables("The &variable; string", "&", ";")
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The &variable.variable; string", "&", ";")
assert variables == [(4, "variable.variable")]
def test_getnumbers():
"""test operation of getnumbers()"""
assert decoration.getnumbers(u"") == []
assert decoration.getnumbers(u"No numbers") == []
assert decoration.getnumbers(u"Nine 9 nine") == ["9"]
assert decoration.getnumbers(u"Two numbers: 2 and 3") == ["2", "3"]
assert decoration.getnumbers(u"R5.99") == ["5.99"]
# TODO fix these so that we are able to consider locale specific numbers
#assert decoration.getnumbers(u"R5,99") == ["5.99"]
#assert decoration.getnumbers(u"1\u00a0000,99") == ["1000.99"]
assert decoration.getnumbers(u"36°") == [u"36°"]
assert decoration.getnumbers(u"English 123, Bengali \u09e7\u09e8\u09e9") == [u"123", u"\u09e7\u09e8\u09e9"]
def test_getfunctions():
"""test operation of getfunctions()"""
assert decoration.getfunctions(u"") == []
assert decoration.getfunctions(u"There is no function") == []
assert decoration.getfunctions(u"Use the getfunction() function.") == ["getfunction()"]
assert decoration.getfunctions(u"Use the getfunction1() function or the getfunction2() function.") == ["getfunction1()", "getfunction2()"]
assert decoration.getfunctions(u"The module.getfunction() method") == ["module.getfunction()"]
assert decoration.getfunctions(u"The module->getfunction() method") == ["module->getfunction()"]
assert decoration.getfunctions(u"The module::getfunction() method") == ["module::getfunction()"]
assert decoration.getfunctions(u"The function().function() function") == ["function().function()"]
assert decoration.getfunctions(u"Deprecated, use function().") == ["function()"]
assert decoration.getfunctions(u"Deprecated, use function() or other().") == ["function()", "other()"]
|
marbindrakon/eve-poscensus
|
census.py
|
Python
|
gpl-3.0
| 5,093
| 0.001767
|
# POS Census v0.1
# Copyright (c) 2012 Andrew Austin
#
|
This program is free software:
|
you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Names and data specific to Eve Online are Copyright CCP Games H.F.
import eveapi
import csv
import sqlite3
# Put your API information here
keyID = XXXXXXX
vCode = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Put the system ID you want to take a census of here
systemID = XXXXXXXXX
def build_database():
"""
Build a sqlite3 database from the mapDenormalize.csv data file.
"""
print "Building database...\n"
conn = sqlite3.Connection("mapData.db")
c = conn.cursor()
c.execute('''CREATE TABLE mapDenormalize (id int, name text)''')
reader = csv.reader(open('mapDenormalize.csv'))
# Skip the header row
next(reader)
# Build a list from which we'll populate the sqlite DB
records = []
for row in reader:
records.append((int(row[0]), row[11]))
print "Inserting %s rows to mapData.db..." % len(records)
c.executemany("INSERT INTO mapDenormalize VALUES (?,?)", records)
conn.commit()
conn.close()
class POS:
"""
A POS object, contains a location string, and lists of chas and smas.
The lists of chas and smas are lists of (itemID, name) tuples.
"""
def __init__(self, name, location, x, y, z, smas=[], chas=[]):
self.name = name
self.location = location
self.smas = smas
self.chas = chas
self.x = x
self.y = y
self.z = z
def report(self):
"""
Output the report for this POS.
"""
print "*****************************"
print "POS: %s at %s" % (self.name, self.location)
print "\t %s CHAs:" % len(self.chas)
for cha in self.chas:
print "\t \t itemID: %s \t Name: %s" % (cha[0], cha[1])
print "\t %s SMAs:" % len(self.smas)
for sma in self.smas:
print "\t \t itemID: %s \t Name: %s" % (sma[0], sma[1])
print "*****************************"
def is_owner(self, x, y, z):
"""
Returns True if the given x,y,z coordinates are within 350km of the POS.
"""
minx = self.x - 350000
maxx = self.x + 350000
miny = self.y - 350000
maxy = self.y + 350000
minz = self.z - 350000
maxz = self.z + 350000
return minx <= x <= maxx and miny <= y <= maxy and minz <= z <= maxz
def generate_report():
"""
Main entry point for the program.
Generates POS objects StarbaseList API and populates them
using AssetList and Locations API calls.
"""
api = eveapi.EVEAPIConnection()
auth = api.auth(keyID=keyID, vCode=vCode)
conn = sqlite3.Connection('mapData.db')
c = conn.cursor()
print "Downloading Corporation Asset List..."
assets = auth.corp.AssetList()
print "Downloading Starbase List..."
starbases = auth.corp.StarbaseList()
rawCHAList = []
rawSMAList = []
poslist = []
for asset in assets.assets:
if asset.locationID == systemID:
if asset.typeID == 17621:
rawCHAList.append(asset.itemID)
if asset.typeID == 12237:
rawSMAList.append(asset.itemID)
print "Building POS List..."
for pos in starbases.starbases:
locationapi = auth.corp.Locations(IDs=pos.itemID).locations[0]
moon = c.execute("SELECT name from mapDenormalize WHERE id = %s" % pos.moonID).fetchone()[0]
poslist.append(POS(name=locationapi.itemName,
location=moon, smas=[], chas=[], x=locationapi.x,
y=locationapi.y, z=locationapi.z))
print "Processing SMAs..."
for sma in rawSMAList:
locationapi = auth.corp.Locations(IDs=sma).locations[0]
x = locationapi.x
y = locationapi.y
z = locationapi.z
name = locationapi.itemName
for pos in poslist:
if pos.is_owner(x=x, y=y, z=z):
pos.smas.append((sma, name))
print "Processing CHAs..."
for cha in rawCHAList:
locationapi = auth.corp.Locations(IDs=cha).locations[0]
x = locationapi.x
y = locationapi.y
z = locationapi.z
name = locationapi.itemName
for pos in poslist:
if pos.is_owner(x=x, y=y, z=z):
pos.chas.append((cha, name))
print "Displaying Report..."
for pos in poslist:
pos.report()
# Make sure we enter at generate_report()
if __name__ == "__main__":
generate_report()
|
alexlo03/ansible
|
test/units/modules/network/f5/test_bigip_vcmp_guest.py
|
Python
|
gpl-3.0
| 5,742
| 0.000522
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_vcmp_guest import Parameters
from library.modules.bigip_vcmp_guest import ModuleManager
from library.modules.bigip_vcmp_guest import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_vcmp_guest import Parameters
from ansible.modules.network.f5.bigip_vcmp_guest import ModuleManager
from ansible.modules.network.f5.bigip_vcmp_guest import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso',
mgmt_network='bridged',
mgmt_address='1.2.3.4/24',
vlans=[
'vlan1',
'vlan2'
]
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso'
assert p.mgmt_network == 'bridged'
def test_module_parameters_mgmt_bridged_without_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/32'
def test_module_parameters_mgmt_address_cidr(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/24'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_address_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/255.255.255.0'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_route(self):
args = dict(
mgmt_route='1.2.3.4'
)
p = Parameters(params=args)
assert p.mgmt_route == '1.2.3.4'
def test_module_parameters_vcmp_software_image_facts(self):
# vCMP images may include a forward slash in their names. This is probably
# related to the slots on the system, but it is not a valid value to specify
# that slot when providing an initial image
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso/1',
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso/1'
def test_api_parameters(self):
args = dict(
initialImage="BIGIP-tmos-tier2-13.1.0.0.0.931.iso",
managementGw="2.2.2.2",
managementIp="1.1.1.1/24",
managementNetwork="bridged",
state="deployed",
vlans=[
"/Common/vlan1",
"/Common/vlan2"
]
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-tmos-tier2-13.1.0.0.0.931.iso'
assert p.mgmt_route == '2.2.2.2'
assert p.mgmt_address == '1.1.1.1/24'
assert '/Common/vlan1' in p.vlans
assert '/Common/vlan2' in p.vlans
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create_vlan(self, *args):
set_module_args(dict(
na
|
me="guest1",
mgmt_network="bridged",
mgmt_address="10.10.10.10/24",
initial_image="BIGIP-13.1.0.0.0.931.iso",
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
|
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
mm.is_deployed = Mock(side_effect=[False, True, True, True, True])
mm.deploy_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'guest1'
|
NextThought/pypy-numpy
|
numpy/random/setup.py
|
Python
|
bsd-3-clause
| 3,228
| 0.006196
|
from __future__ import division, print_function
from os.path import join, split, dirname
import os
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1'),
]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
# Configure mtrand
try:
import cffi
have_cffi = True
except ImportError:
have_cffi = False
if have_cffi:
#create the dll/so for the cffi version
if sys.platform == 'win32':
libs.append('Advapi32')
defs.append(('_MTRAND_DLL',None))
config.add_shared_library('_mtrand',
sources=[join('mtrand', x) for x in
['randomkit.c', 'distributions.c', 'initarray.c']],
build_info = {
'libraries': libs,
'depends': [join('mtrand', '*.h'),
],
'macros': defs,
}
)
else:
config.add_extension('mtrand',
sources=[join('mtrand', x) for
|
x in
['mtrand.c', 'randomkit.c', 'i
|
nitarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
zzzoidberg/landscape
|
finance/quotes/options.py
|
Python
|
mit
| 7,159
| 0.000978
|
# -*- coding: UTF-8 -*-
"""
Options quotes.
"""
import os
import json
import logging
from datetime import datetime
import collections
import requests
from landscape.finance import consts, database, dates, utils
from landscape.finance.volatility import math
OptionQuote = collections.namedtuple(
'OptionQuote',
'symbol type expiration strike date time bid ask stock iv_bid iv_ask')
CBOE_URL = 'http://www.cboe.com/DelayedQuote/QuoteTableDownload.aspx'
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, 'data/cboe_headers.json')) as f:
CBOE_HEADERS = json.load(f)
with open(os.path.join(current_dir, 'data/cboe_post_data.json')) as f:
CBOE_POST_DATA = json.load(f)
CBOE_POST_DATA_TICKER_KEY = 'ctl00$ctl00$AllContent$ContentMain$' \
'QuoteTableDownloadCtl1$txtTicker'
MONTHS = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
SKIP_SYMBOLS = ['SPXW', 'SPXQ', 'SPY7', 'SPYJ', 'VXX2']
def save_quote(db, quote):
"""Saves quote to database"""
expiration = database.encode_date(quote.expiration)
date = database.encode_date(quote.date)
time = database.encode_time(quote.time)
db.execute('UPDATE options SET time=?, bid=?, ask=?, stock=?, ' \
'iv_bid=?, iv_ask=? ' \
'WHERE symbol=? AND type=? AND expiration=? ' \
'AND strike=? AND date=?;', [time, quote.bid, quote.ask,
quote.stock, quote.iv_bid, quote.iv_ask, quote.symbol,
quote.type, expiration, quote.strike, date])
if db.rowcount == 0:
db.execute('INSERT INTO options VALUES ' \
|
'(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);',
[quote.symbol, quote.type, expiration, quote.strike,
date, time, quote.bid, quote.ask, quote.stock,
quote.iv_bid, quote.iv_ask])
def quote_factory(_, row):
"""Converts row to quote"""
symbol, type_, expiration, strike, date,
|
time, \
bid, ask, stock, iv_bid, iv_ask = row
expiration = database.decode_date(expiration)
date = database.decode_date(date)
time = database.decode_time(time)
return OptionQuote(symbol, type_, expiration, strike, date, time,
bid, ask, stock, iv_bid, iv_ask)
def _fetch_data(symbol):
"""Fetches realtime (delayed) options quotes from CBOE as a raw text.
Args:
symbol (str): Symbol to fetch.
Returns:
str: Raw quotes, or None if failed.
"""
logger = logging.getLogger(__name__)
logger.info('Fetching options quotes from CBOE for %s ...', symbol)
data = dict(CBOE_POST_DATA)
data[CBOE_POST_DATA_TICKER_KEY] = symbol
response = requests.post(CBOE_URL, data=data, headers=CBOE_HEADERS)
if response.status_code == 200:
return response.text
else:
logger.error('Cannot fetch options quotes from CBOE for %s', symbol)
def _parse_data(symbol, data, is_eod, db_name=None, timestamp=None):
"""Parses realtime (delayed) options quotes from CBOE and saves to
database.
Args:
symbol (str): Symbol.
data (str): Raw quotes for the symbol.
is_eod (bool): If True: mark received quotes as EOD (time=None),
if False: store actual time.
db_name (str): Optional database name.
timestamp (datetime): Optional datetime for the data.
Returns:
list: List of OptionQuote objects.
"""
logger = logging.getLogger(__name__)
if timestamp is None:
timestamp = dates.get_database_timestamp()
date = timestamp.date()
time = None if is_eod else timestamp.time()
quotes = []
stock_price = None
expirations = dates.get_expirations(symbol)
with database.connect_db(db_name) as db:
for line in data.splitlines():
values = line.strip().split(',')
if (len(values) == 4) and (stock_price is None):
stock_price = utils.to_float(values[1])
continue
if len(values) != 15:
continue
if values[0] == 'Calls' or values[0].find('-') >= 0:
continue
code_values = values[0].split(' ')
if len(code_values) != 4:
continue
position = code_values[3].find(code_values[0])
if code_values[3][1:position] in SKIP_SYMBOLS:
continue
expiration_year = 2000 + int(code_values[0])
expiration_month = MONTHS[code_values[1]]
expiration_day = int(code_values[3][position + 2:position + 4])
expiration = datetime(expiration_year, expiration_month,
expiration_day).date()
if expiration not in expirations:
continue
strike = utils.to_float(code_values[2])
for type_, bid, ask in [
(consts.CALL, values[3], values[4]),
(consts.PUT, values[10], values[11]),
]:
bid = utils.to_float(bid)
ask = utils.to_float(ask)
quote = OptionQuote(
symbol, type_, expiration, strike, date, time,
bid, ask, stock_price, None, None)
iv_bid = math.calc_iv(quote, bid) * 100
iv_ask = math.calc_iv(quote, ask) * 100
quote = OptionQuote(
symbol, type_, expiration, strike, date, time,
bid, ask, stock_price, iv_bid, iv_ask)
save_quote(db, quote)
quotes.append(quote)
logger.info('... quotes parsed: %d', len(quotes))
return quotes
def fetch_realtime(symbol, db_name=None):
"""Fetches realtime (delayed) options quotes from CBOE and saves to
database.
Args:
symbol (str): Symbol to fetch.
db_name (str): Optional database name.
Returns:
list: list of OptionQuote objects
"""
data = _fetch_data(symbol)
return _parse_data(symbol, data, False, db_name) if data else []
def fetch_historical(symbol, db_name=None):
"""Actually stores realtime data to database.
There's no free EOD options quotes provider so you need to call this method
at the end of each business day.
Args:
symbol: Symbol to fetch.
db_name (str): Optional database name.
Returns:
Number of quotes fetched.
"""
data = _fetch_data(symbol)
return len(_parse_data(symbol, data, True, db_name)) if data else 0
def query_historical(symbol, date, db_name=None):
"""Queries historical quotes from local database for given symbol and date.
Mimics fetch_realtime.
Args:
symbol (str): Stock symbol.
date (date): Date to query.
db_name (str): Optional database name.
Returns:
See fetch_realtime.
"""
with database.connect_db(db_name) as db:
db.row_factory = quote_factory
db.execute('SELECT * FROM options WHERE symbol=? AND date=?;',
[symbol, database.encode_date(date)])
return db.fetchall()
|
JonathonReinhart/killerbee
|
killerbee/zbwardrive/db.py
|
Python
|
bsd-3-clause
| 2,845
| 0.009842
|
import string
# Manages Local "database" for ZBWarDrive:
# This keeps track of current ZBWarDrive and Sniffing Device State.
# It is different from the online logging database.
class ZBScanDB:
"""
API to interact with the "database" storing information
for the zbscanning program.
"""
def __init__(self):
self.channels = {11:None, 12:None, 13:None, 14:None, 15:None, 16:None, 17:None, 18:None, 19:None, 20:None, 21:None, 22:None, 23:None, 24:None, 25:None, 26:None}
# Devices is indexed by deviceId and stores a 4-tuple of device string, device serial, current status, and current channel
self.devices = {}
def close(self):
pass
# Add a new devices to the DB
def store_devices(self, devid, devstr, devserial):
self.devices[devid] = (devstr, devserial, 'Free', None)
# Returns the devid of a device marked 'Free',
# or None if there are no Free devices in the DB.
def get_devices_nextFree(self):
for devid, dev in self.devices.items():
if dev[2] == 'Free':
return devid
def update_devices_status(self, devid, newstatus):
if devid not in self.devices:
return None
(devstr, devserial, _, chan) = self.devices[devid]
self.devices[devid] = (devstr, devserial, newstatus, chan)
def update_devices_start_capture(self, devid, channel):
if devid not in self.devices:
return None
(devstr, devserial, _, _) = self.devices[devid]
self.devices[devid] = (devstr, devserial, "Capture", channel)
# Add a new network to the DB
def store_networks(self, key, spanid, source, channel, packet):
if channel not in self.channels:
return None
# TODO note this only stores the most recent in the channel
self.channels[channel] = (key, spanid, source, packet)
# Return the channel of the network identified by key,
# or None if it doesn't exist i
|
n the DB.
def get_networks_channel(self, key):
#print "Looking up channel for network with key of %s" % (key)
for chan, data in self.channels:
if data[0] == key: return chan
return None
def channel_status_logging(self, chan):
'''
Returns False if we have not seen the network or are not currently
logging it's channel, and returns True if we are currently logging it.
@return boolean
'''
if chan == None: rai
|
se Exception("None given for channel number")
elif chan not in self.channels: raise Exception("Invalid channel")
for dev in self.devices.values():
if dev[3] == chan and dev[2] == 'Capture':
return True
return False
# end of ZBScanDB class
def toHex(bin):
return ''.join(["%02x" % ord(x) for x in bin])
|
joopert/home-assistant
|
homeassistant/components/monoprice/media_player.py
|
Python
|
apache-2.0
| 7,123
| 0.000421
|
"""Support for interfacing with Monoprice 6 zone home audio controller."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_RESTORE, SERVICE_SNAPSHOT
_LOGGER = logging.getLogger(__name__)
SUPPORT_MONOPRICE = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
ZONE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
SOURCE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
CONF_ZONES = "zones"
CONF_SOURCES = "sources"
DATA_MONOPRICE = "monoprice"
# Valid zone ids: 11-16 or 21-26 or 31-36
ZONE_IDS = vol.All(
vol.Coerce(int),
vol.Any(
vol.Range(min=11, max=16), vol.Range(min=21, max=26), vol.Range(min=31, max=36)
),
)
# Valid source ids: 1-6
SOURCE_IDS = vol.All(vol.Coerce(int), vol.Range(min=1, max=6))
MEDIA_PLAYER_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.comp_entity_ids})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_ZONES): vol.Schema({ZONE_IDS: ZONE_SCHEMA}),
vol.Required(CONF_SOURCES): vol.Schema({SOURCE_IDS: SOURCE_SCHEMA}),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Monoprice 6-zone amplifier platform."""
port = config.get(CONF_PORT)
from serial import SerialException
from pymonoprice import get_monoprice
try:
monoprice = get_monoprice(port)
except SerialException:
_LOGGER.error("Error connecting to Monoprice controller")
return
sources = {
source_id: extra[CONF_NAME] for source_id, extra in config[CONF_SOURCES].items()
|
}
hass.data[DATA_MONOPRICE] = []
for zone_id, extra
|
in config[CONF_ZONES].items():
_LOGGER.info("Adding zone %d - %s", zone_id, extra[CONF_NAME])
hass.data[DATA_MONOPRICE].append(
MonopriceZone(monoprice, sources, zone_id, extra[CONF_NAME])
)
add_entities(hass.data[DATA_MONOPRICE], True)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [
device
for device in hass.data[DATA_MONOPRICE]
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_MONOPRICE]
for device in devices:
if service.service == SERVICE_SNAPSHOT:
device.snapshot()
elif service.service == SERVICE_RESTORE:
device.restore()
hass.services.register(
DOMAIN, SERVICE_SNAPSHOT, service_handle, schema=MEDIA_PLAYER_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_RESTORE, service_handle, schema=MEDIA_PLAYER_SCHEMA
)
class MonopriceZone(MediaPlayerDevice):
"""Representation of a Monoprice amplifier zone."""
def __init__(self, monoprice, sources, zone_id, zone_name):
"""Initialize new zone."""
self._monoprice = monoprice
# dict source_id -> source name
self._source_id_name = sources
# dict source name -> source_id
self._source_name_id = {v: k for k, v in sources.items()}
# ordered list of all source names
self._source_names = sorted(
self._source_name_id.keys(), key=lambda v: self._source_name_id[v]
)
self._zone_id = zone_id
self._name = zone_name
self._snapshot = None
self._state = None
self._volume = None
self._source = None
self._mute = None
def update(self):
"""Retrieve latest state."""
state = self._monoprice.zone_status(self._zone_id)
if not state:
return False
self._state = STATE_ON if state.power else STATE_OFF
self._volume = state.volume
self._mute = state.mute
idx = state.source
if idx in self._source_id_name:
self._source = self._source_id_name[idx]
else:
self._source = None
return True
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state of the zone."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is None:
return None
return self._volume / 38.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
@property
def supported_features(self):
"""Return flag of media commands that are supported."""
return SUPPORT_MONOPRICE
@property
def media_title(self):
"""Return the current source as medial title."""
return self._source
@property
def source(self):
"""Return the current input source of the device."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
def snapshot(self):
"""Save zone's current state."""
self._snapshot = self._monoprice.zone_status(self._zone_id)
def restore(self):
"""Restore saved state."""
if self._snapshot:
self._monoprice.restore_zone(self._snapshot)
self.schedule_update_ha_state(True)
def select_source(self, source):
"""Set input source."""
if source not in self._source_name_id:
return
idx = self._source_name_id[source]
self._monoprice.set_source(self._zone_id, idx)
def turn_on(self):
"""Turn the media player on."""
self._monoprice.set_power(self._zone_id, True)
def turn_off(self):
"""Turn the media player off."""
self._monoprice.set_power(self._zone_id, False)
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._monoprice.set_mute(self._zone_id, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._monoprice.set_volume(self._zone_id, int(volume * 38))
def volume_up(self):
"""Volume up the media player."""
if self._volume is None:
return
self._monoprice.set_volume(self._zone_id, min(self._volume + 1, 38))
def volume_down(self):
"""Volume down media player."""
if self._volume is None:
return
self._monoprice.set_volume(self._zone_id, max(self._volume - 1, 0))
|
bgroff/django-cas-ng
|
django_cas_ng/migrations/0001_initial.py
|
Python
|
mit
| 1,628
| 0.003686
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-13 18:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProxyGrantingTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(blank=True, max_length=255, null=True)),
('pgtiou', models.CharField(blank=True, max_length=255, null=True)),
('pgt', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
|
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SessionTicket',
fields=[
('id', models.AutoField(auto_cre
|
ated=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(max_length=255)),
('ticket', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='proxygrantingticket',
unique_together=set([('session_key', 'user')]),
),
]
|
stefanv/lulu
|
lulu/tests/test_lulu.py
|
Python
|
bsd-3-clause
| 1,934
| 0.001551
|
from numpy.testing import *
import numpy as np
import lulu
import l
|
ulu.connected_region_handler as crh
class TestLULU:
img = np.zeros((5, 5)).astype(int)
img[0, 0:5] = 0
img[:, 4] = 1
img[1:3, 1:4] = 2
"""
[[0 0 0 0 1]
[0 2 2 2 1]
[0 2 2 2 1]
[0 0 0 0 1]
[0 0 0 0 1]]
"""
def test_connected_regions(self):
labels, regions = lulu.connected_regions(self.img)
assert_array_equal(labels, self.img)
assert_equal(len(regions), 3)
crh.set_value(reg
|
ions[0], 5)
assert_array_equal(crh.todense(regions[0]),
[[5, 5, 5, 5, 0],
[5, 0, 0, 0, 0],
[5, 0, 0, 0, 0],
[5, 5, 5, 5, 0],
[5, 5, 5, 5, 0]])
assert_array_equal(crh.todense(regions[1]),
[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]])
assert_array_equal(crh.todense(regions[2]),
[[0, 0, 0, 0, 0],
[0, 2, 2, 2, 0],
[0, 2, 2, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
class TestReconstruction:
def test_basic(self):
img = np.random.randint(255, size=(200, 200))
pulses = lulu.decompose(img)
img_, areas, area_count = lulu.reconstruct(pulses, img.shape)
# Write assert this way so that we can see how many
# pixels mismatch as a percent of the total nr of pixels
assert_array_equal(img_, img)
assert_equal(np.sum(img_ != img) / float(np.prod(img.shape)) * 100,
0, "Percentage mismatch =")
if __name__ == "__main__":
run_module_suite()
|
naturalis/HTS-barcode-checker
|
src/Parse_CITES.py
|
Python
|
bsd-3-clause
| 2,371
| 0.033319
|
#!/usr/bin/env python
from hts_barcode_checker import Taxon, TaxonDB
import logging, datetime, argparse, sqlite3
# NCBI taxonomy tree database 10.6084/m9.figshare.4620733
parser = argparse.ArgumentParser(description = 'Create a table containing the CITES species')
parser.add_argument('-db', '--CITES_db', metavar='CITES database name', dest='db',type=str,
help='Name and path to the output location for the CITES database')
parser.add_argument('-csv', '--CITES_dump', metavar='CITES CSV dump', dest='dmp', type=str,
help='Location of the CSV dump downloaded from CITES')
parser.add_argument('-ncbi', '--NCBI_taxonomy', metavar='NCBI taxonomy tree database', dest='n', type=str,
help='Location of sqlite database with NCBI taxonomy tree')
parser.add_argument('-l', '--logging', metavar='log level', dest='l', type=str,
help = 'Set log level to: debug, info, warning (default) or critical see readme for more details.', default='warning')
parser.add_argument('-lf', '--log_file', metavar='log file', dest='lf', type=str,
help = 'Path to the log file')
args = parser.parse_args()
def main ():
# configure logger
log_level = getattr(logging, args.l.upper(), None)
log_format = '%(funcName)s [%(lineno)d]: %(levelname)s: %(message)s'
if not isinstance(log_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
return
if args.lf == '':
logging.basicConfig(format=log_format, level=log_level)
else:
logging.basicConfig(filename=args.lf, filemode='a', format=log_format, level=log_level)
# instantiate DB object, parse CITES dump
db = TaxonDB(date=str(datetime.datetime.now()))
db.from_dump(args.dmp)
# configure local sqlite database
conn = sqlite3.connect(args.n)
curr = conn.cursor()
# iterate over parsed taxa, resolve NCBI taxid and expand higher taxa
counter = 1
expanded = []
for taxon in db.taxa:
taxon.tnrs(cursor=curr)
result = taxon.expand(cursor=curr)
for taxid in result.keys():
expanded.append(Taxon(
appendix=taxon.appendix,
name=taxon.name,
description=taxon.description,
footnotes=taxon.footnotes,
ncbi={taxid:result[taxid]}
))
logging.info('%d/%d' % ( counter, len(db.taxa) ))
counter +=
|
1
# write output
for taxon in expanded:
db.taxa.append(taxon)
handle = open(args.db, 'w')
db.to_csv
|
(handle)
handle.close()
if __name__ == "__main__":
main()
|
ctiller/grpc
|
tools/run_tests/lb_interop_tests/gen_build_yaml.py
|
Python
|
apache-2.0
| 12,124
| 0.000577
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
|
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
a
|
ll_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
|
ThiefMaster/sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/ext.py
|
Python
|
mit
| 4,889
| 0.000205
|
# postgresql/ext.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ...sql import expression
from ...sql import elements
from ...sql import functions
from ...sql.schema import ColumnCollectionConstraint
from .array import ARRAY
class aggregate_order_by(expression.ColumnElement):
"""Represent a Postgresql aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionadded:: 1.1
.. seealso::
:class:`.array_agg`
"""
__visit_name__ = 'aggregate_order_by'
def __init__(self, target, order_by):
self.target = elements._literal_as_binds(target)
self.order_by = elements._literal_as_binds(order_by)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/\
static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
where = None
def __init__(self, *elements, **kw):
"""
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
column must be a column name or Column object and operator must
be a string containing the operator to use.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional string. If set, emit WHERE <predicate> when issuing DDL
for this constraint.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
self._extract_col_expression_collection(expressions),
operators
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
expr = expression._literal_as_text(expr)
render_exprs.append(
(expr, name, operator)
)
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
)
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where is not None:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
name=self.name,
def
|
errable=self.deferrable,
initially=self.initially)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""Postgresql-specific form of :class:`.array_agg`, ensures
return type is :class:`.postgresql.ARRAY` and not
the plain :class:`.types.ARRAY`.
.. versionadded:: 1.1
"""
kw['type_'] = ARRAY(functions._type
|
_from_args(arg))
return functions.func.array_agg(*arg, **kw)
|
JNRowe/versionah
|
tests/test_python_compat.py
|
Python
|
gpl-3.0
| 1,950
| 0
|
#
"""test_python_compat - Python output compatibility tests"""
# Copyright © 2012-2018 James Rowe <jnrowe@gmail.com>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of versionah.
#
# versionah is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# versionah is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# versionah. If not, see <http://www.gnu.org/licenses/>.
from os import getenv
from shutil import which
from subprocess import PIPE, call
from pytest import mark, skip
from versionah.cmdline import CliVersion
@mark.requires_exec
@mark.requires_write
@mark.parametrize('interp', [
'python2.6',
'python2.7',
'python3.2',
'python3.3',
])
def test_python_compatibility(interp, tmpdir):
if not whic
|
h(interp):
skip('Interpreter {!r} unavailable'.format(interp))
file_loc = tmpdir.join('test_wr.py').strpath
CliVersion('1.0.1').write(file_loc, 'py')
retval = call([interp, '-W', 'all', file_lo
|
c], stdout=PIPE,
stderr=PIPE)
assert retval == 0
# Test interps not available on travis-ci.org, but available on all our test
# machines
@mark.skipif(getenv('TRAVIS_PYTHON_VERSION'), reason='Unavailable on travis')
@mark.requires_exec
@mark.requires_write
@mark.parametrize('interp', [
'python2.4',
'python2.5',
'python3.1',
'python3.4',
])
def test_python_compatibility_extra(interp):
if not which(interp):
skip('Interpreter {!r} unavailable'.format(interp))
test_python_compatibility(interp)
|
enthought/pyside
|
tests/QtGui/bug_389.py
|
Python
|
lgpl-2.1
| 414
| 0.007246
|
''' Test bug 389: http://bugs.openbossa.org/show_bug.cgi?id=389'''
i
|
mport sys
import unittest
from helper import UsesQApplication
from PySide import QtCore,QtGui
class BugTest(UsesQApplication):
def testCase(self):
s = QtG
|
ui.QWidget().style()
i = s.standardIcon(QtGui.QStyle.SP_TitleBarMinButton)
self.assertEqual(type(i), QtGui.QIcon)
if __name__ == '__main__':
unittest.main()
|
i-DAT-Qualia/Card-Backend
|
cards/admin.py
|
Python
|
apache-2.0
| 528
| 0.007576
|
from django.contrib import admin
from models import
|
*
from cards.actions import export_as_xls
class ScanAdmin(admin.ModelAdmin):
list_filter = ['readerLocation', 'added']
search_fields = ['card__code']
# Register your models here.
admin.site.register(Batch)
admin.site.re
|
gister(Card)
admin.site.register(Reader)
admin.site.register(Location)
admin.site.register(ReaderLocation)
admin.site.register(Scan, ScanAdmin)
class MyAdmin(admin.ModelAdmin):
actions = [export_as_xls]
admin.site.add_action(export_as_xls)
|
lgapontes/django-socialregistration
|
socialregistration/contrib/twitter/auth.py
|
Python
|
mit
| 487
| 0.002053
|
from django.contrib.auth.backends import ModelBackend
|
from django.contrib.sites.models import Site
from socialregistration.contrib.twitter.models import TwitterProfile
class TwitterAuth(ModelBackend):
def authenticate(self, twitter_id=None):
try:
return TwitterProfile.objects.get(
twitter_id=twitter_id,
site=Site.objects.get_current()
|
).user
except TwitterProfile.DoesNotExist:
return None
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2d/xbins/_size.py
|
Python
|
mit
| 394
| 0.002538
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basev
|
alidators.AnyValidator):
def __init__(self, plotly_name="size", parent_name="histogram2d.xbins", **kwargs):
super(SizeValidator, self).__init__(
plotly_n
|
ame=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
wzhang1984/Noncoding-tumor-mutation-paper
|
motif_analysis/ref2alt.py
|
Python
|
gpl-3.0
| 1,057
| 0.02649
|
info=[]
for line in open('./info_mappable_50.txt').read().rstrip().split('\n'):
a=line.split('\t')
info.append(a[0])
line_out=''
line_out2=''
seq=''
index=0
for line in open('./seqList_mappable_50.fa'):
if line[0]=='>':
if seq:
replace=seq[7:-7]
if replace!=ref:
print header
seq_alt=seq[:7]+alt+seq[-7:]
line_out+='>'+header+'_ref'+'\n'+seq+'\n'
line_out2+='>'+header+'_alt'+'\n'
|
+seq_alt+'\n'
header=info[index]
ref_alt=header.split('_')[1]
[ref,alt]=ref_alt.split('>')
index+=1
seq=''
if index/1000000==index/1000000.0:
print index
else:
seq+=line.split('\n')[0]
if seq:
replace=seq[7:-7]
if replace!=ref:
print header
seq_alt=seq[:7]+
|
alt+seq[-7:]
line_out+='>'+header+'_ref'+'\n'+seq+'\n'
line_out2+='>'+header+'_alt'+'\n'+seq_alt+'\n'
open('./seqList_mappable_50_ref.fa','wb').write(line_out)
open('./seqList_mappable_50_alt.fa','wb').write(line_out2)
|
sigma-geosistemas/clone
|
src/manage.py
|
Python
|
lgpl-3.0
| 248
| 0
|
#!/usr/bin/env python
import os
imp
|
ort sys
if __name__ == "__main__":
os.environ.setdefault("DJ
|
ANGO_SETTINGS_MODULE", "clone.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
UdK-VPT/Open_eQuarter
|
mole/stat_corr/window_wall_ratio_east_MFH_by_building_age_lookup.py
|
Python
|
gpl-2.0
| 2,380
| 0.147121
|
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio East in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
import oeqLookuptable as oeq
def get(
|
*xin):
l_lookup = oeq.lookuptable(
[
1849,0.031,
1850,0.031,
1851,0.03,
1852,0.027,
1853,0.024,
1854,0.025,
1855,0.03,
1856,0.042,
1857,0.06,
1858,0.082,
1859,0.105,
1860,0.128,
1861,0.15,
1862,0.168,
1863,0.18,
1864,0.18,
1865,0.18,
1866,0.18,
1867,0.18,
1868,0.179,
1869,0.179,
1870,0.179,
1871,0.18,
1872,0.18,
1873,0.18,
1874,0.18,
1875,0.18,
1876,0.18,
1877,0.18,
1878,0.18,
1879,0.18,
1880,0.18,
1881,0.18,
1882,0.1
|
8,
1883,0.18,
1884,0.18,
1885,0.18,
1886,0.18,
1887,0.18,
1888,0.18,
1889,0.18,
1890,0.18,
1891,0.18,
1892,0.18,
1893,0.18,
1894,0.18,
1895,0.18,
1896,0.18,
1897,0.18,
1898,0.18,
1899,0.18,
1900,0.18,
1901,0.18,
1902,0.18,
1903,0.18,
1904,0.18,
1905,0.18,
1906,0.18,
1907,0.18,
1908,0.179,
1909,0.179,
1910,0.179,
1911,0.18,
1912,0.18,
1913,0.18,
1914,0.18,
1915,0.18,
1916,0.168,
1917,0.15,
1918,0.128,
1919,0.105,
1920,0.082,
1921,0.06,
1922,0.042,
1923,0.03,
1924,0.025,
1925,0.024,
1926,0.027,
1927,0.03,
1928,0.031,
1929,0.031,
1930,0.031,
1931,0.03,
1932,0.03,
1933,0.03,
1934,0.03,
1935,0.03,
1936,0.03,
1937,0.03,
1938,0.03,
1939,0.03,
1940,0.03,
1941,0.03,
1942,0.03,
1943,0.03,
1944,0.03,
1945,0.03,
1946,0.029,
1947,0.026,
1948,0.02,
1949,0.012,
1950,0.003,
1951,0,
1952,0,
1953,0,
1954,0.014,
1955,0.036,
1956,0.062,
1957,0.09,
1958,0.118,
1959,0.144,
1960,0.165,
1961,0.18,
1962,0.18,
1963,0.18,
1964,0.18,
1965,0.18,
1966,0.173,
1967,0.166,
1968,0.158,
1969,0.15,
1970,0.141,
1971,0.133,
1972,0.125,
1973,0.12,
1974,0.118,
1975,0.117,
1976,0.118,
1977,0.12,
1978,0.121,
1979,0.12,
1980,0.113,
1981,0.1,
1982,0.078,
1983,0.053,
1984,0.028,
1985,0.01,
1986,0.002,
1987,0.002,
1988,0.006,
1989,0.01,
1990,0.011,
1991,0.01,
1992,0.009,
1993,0.01,
1994,0.013,
1995,0.019,
1996,0.025,
1997,0.03,
1998,0.034,
1999,0.036,
2000,0.038,
2001,0.04,
2002,0.043,
2003,0.045,
2004,0.048,
2005,0.05,
2006,0.051,
2007,0.051,
2008,0.05,
2009,0.05,
2010,0.05,
2011,0.05,
2012,0.05,
2013,0.05,
2014,0.05,
2015,0.05,
2016,0.05,
2017,0.05,
2018,0.05,
2019,0.05,
2020,0.05,
2021,0.05])
return(l_lookup.lookup(xin))
|
maroux/django-oauth2-provider
|
provider/compat/__init__.py
|
Python
|
mit
| 148
| 0
|
try:
from django.contrib.auth.tests.u
|
tils import skipIfCustomUser
except ImportError:
def sk
|
ipIfCustomUser(wrapped):
return wrapped
|
vlegoff/cocomud
|
src/ui/sharp_editor.py
|
Python
|
bsd-3-clause
| 10,805
| 0.001574
|
# Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file contains the SharpEditor class."""
import wx
from ytranslate.tools import t
class SharpEditor(wx.Panel):
"""SharpScript editor panel.
This panel can be added into dialogs that have to support SharpScript
editing. On the top, at the left of the panel, is an optional
text field to edit SharpScript directly. To the right is a list
of functions already associated with this entry. After buttons
to edit and remove is a second list with new functions to be added.
"""
def __init__(self, dialog, engine, sharp, object, attribute,
text=False, escape=False):
"""Creates the frame.
Arguments:
dialog: the parent dialog.
engine: the game engine.
sharp: the SharpScript engine.
object: the object containing the field to be edited.
attribute: the attribute's name of the object to edit.
text (default to False): should a text field be added?
escape (default to false): the #send calls are removed.
If the SharpEditor is to modify a trigger, for instance,
particularly its "action" attribute, the trigger is the object
and "action" is the attribute's name.
"""
wx.Panel.__init__(self, dialog)
self.engine = engine
self.sharp_engine = sharp
self.object = object
self.attribute = attribute
self.text = None
self.escape = escape
script = getattr(self.object, self.attribute)
self.functions = sorted(sharp.functions.values(),
key=lambda function: function.name)
self.functions = [f for f in self.functions if f.description]
# Shape
sizer = wx.BoxSizer(wx.VERTICAL)
top = wx.BoxSizer(wx.HORIZONTAL)
bottom = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
# Insert a text field
if text:
s_text = wx.BoxSizer(wx.VERTICAL)
l_text = wx.StaticText(self, label=t("common.action"))
t_text = wx.TextCtrl(self, value=script, style=wx.TE_MULTILINE)
self.text = t_text
s_text.Add(l_text)
s_text.Add(t_text)
top.Add(s_text)
# List of current functions
self.existing = wx.ListCtrl(self,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.existing.InsertColumn(0, t("common.action"))
# Buttons
self.edit = wx.Button(self, label=t("ui.button.edit"))
self.remove = wx.Button(self, label=t("ui.button.remove"))
top.Add(self.existing)
top.Add(self.edit)
top.Add(self.remove)
self.populate_existing()
# List of functions
self.choices = wx.ListCtrl(self, style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.choices.InsertColumn(0, t("common.description"))
self.populate_list()
bottom.Add(self.choices)
# Add button
self.add = wx.Button(self, label=t("ui.button.add_action"))
bottom.Add(self.add)
# Event binding
self.add.Bind(wx.EVT_BUTTON, self.OnAdd)
self.edit.Bind(wx.EVT_BUTTON, self.OnEdit)
self.remove.Bind(wx.EVT_BUTTON, self.OnRemove)
def populate_list(self):
"""Populate the list with function names."""
self.choices.DeleteAllItems()
|
for function in self.functions:
try:
description = t("sharp.{name}.description".format(
name=function.name))
except ValueError:
description = function.description
self.choices.Append((description, ))
self.choices.Select(0)
self.choices.Focus(0)
def populate_existing(self):
"
|
""Populate the list with existing functions."""
self.existing.DeleteAllItems()
script = getattr(self.object, self.attribute)
if self.text:
self.text.SetValue(script)
lines = self.sharp_engine.format(script, return_str=False)
for line in lines:
self.existing.Append((line, ))
self.existing.Select(0)
self.existing.Focus(0)
if lines:
self.existing.Enable()
self.edit.Enable()
self.remove.Enable()
else:
self.existing.Disable()
self.edit.Disable()
self.remove.Disable()
def OnAdd(self, e):
"""The 'add' button is pressed."""
index = self.choices.GetFirstSelected()
try:
function = self.functions[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
dialog = AddEditFunctionDialog(self.engine, self.sharp_engine,
function, self.object, self.attribute, escape=self.escape)
dialog.ShowModal()
self.populate_existing()
self.existing.SetFocus()
def OnEdit(self, e):
"""The 'edit' button is pressed."""
index = self.existing.GetFirstSelected()
script = getattr(self.object, self.attribute)
lines = self.sharp_engine.format(script, return_str=False)
try:
line = lines[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
name, arguments, flags = self.sharp_engine.extract_arguments(line)
function = self.sharp_engine.functions[name[1:]]
dialog = AddEditFunctionDialog(self.engine, self.sharp_engine,
function, self.object, self.attribute, index,
escape=self.escape)
dialog.ShowModal()
self.populate_existing()
self.existing.SetFocus()
def OnRemove(self, e):
"""The 'remove' button is pressed."""
index = self.existing.GetFirstSelected()
script = getattr(self.object, self.attribute)
lines = self.sharp_engine.format(script, return_str=False)
try:
line = lines[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
value = wx.MessageBox(t("ui.message.sharp.remove",
line=line), t("ui.alert.confirm"),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if value == wx.YES:
del lines[index]
content = "\n".join(lines)
setattr(self.object,
|
userzimmermann/robotframework-python3
|
atest/testresources/testlibs/objecttoreturn.py
|
Python
|
apache-2.0
| 343
| 0.011662
|
try:
import exce
|
ptions
except ImportError: # Python 3
import builtins as exceptions
class ObjectToReturn:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def exception(self, name, msg=""):
exception = getattr(exceptions,
|
name)
raise exception(msg)
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-objc/TestDataFormatterObjCNSError.py
|
Python
|
apache-2.0
| 1,050
| 0
|
# encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from ObjCDataFormatterTestCase import ObjCDataFormatterTestCase
class ObjCDataFormatterNSError(ObjCDataFormatterTestCase):
@skipUnle
|
ssDarwin
def test_nserror_with_run_command(self):
"""Test formatters for NSError."""
self.appkit_tester_impl(self.nserror_data_formatter_commands)
def nserror_data_formatter_commands(self):
self.expect(
'frame variable nserror', substrs=['domain: @"Foobar" - code: 12'])
self.expect(
|
'frame variable nserrorptr',
substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserror->_userInfo', substrs=['2 key/value pairs'])
self.expect(
'frame variable nserror->_userInfo --ptr-depth 1 -d run-target',
substrs=['@"a"', '@"b"', "1", "2"])
|
aseemm/flask-template
|
wentries.py
|
Python
|
bsd-3-clause
| 256
| 0
|
from app import write
|
_entries
import datetime
import random
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M-%S")
offset = random.randrange(0, 1475)
print("Enter user=%s" % ts)
print("Enter email=%s" % offset)
prime = write
|
_entries.delay(ts, offset)
|
jablonskim/jupyweave
|
jupyweave/settings/align_types.py
|
Python
|
mit
| 137
| 0
|
from enum import Enum
class ImageAlignType(Enum):
"""Image alignment"""
Default = 1
Left = 2
Rig
|
ht = 3
Center
|
= 4
|
Techblogogy/magic-mirror-base
|
server/routes/gcal.py
|
Python
|
gpl-3.0
| 2,343
| 0.003841
|
import decor
from flask import Blueprint, redirect, request, url_for
import os, json
def construct_bp(gcal, JSON_DENT):
ALLOWED_ORIGIN = "*"
# JSON_DENT = 4
gcal_api = Blueprint('gcal_api', __name__, url_prefix="/gcal")
# GOOGLE CALENDAR API Routes
# Authenication routes
@gcal_api.route('/auth2callback')
def gauth_callback():
return redirect(gcal.auth_callback(request.args.get('code')))
@gcal_api.route('/gauth')
|
def gauth_call():
return redirect(gcal.get_auth_uri())
@gcal_api.route('/isauth')
def gauth_isauth():
return json.dumps({'is_needed': not gcal.need_auth()})
@gcal_api.route('/istblexist')
def gauth_istblex():
return json.dumps({'is_exist': gcal.if_cal_tbl()})
@gcal_api.route('/deauth')
def gauth_deauth():
return redirect(gcal.deauth_usr())
# Get todays events
@gcal_api.route('/today', meth
|
ods=['GET','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_today():
return json.dumps(gcal.get_today(), indent=JSON_DENT)
# Get calendars
@gcal_api.route('/calendars', methods=['GET','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_cals():
return json.dumps(gcal.get_cals(), indent=JSON_DENT)
# Save calendars
@gcal_api.route('/add/calendars', methods=['POST','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_save_cals():
# print request.form.getlist('ids[]')
gcal.add_cals(request.form.getlist('ids[]'))
# print request.form
redirect(url_for('setcal'))
# return json.dumps(gcal.get_ucals(), indent=JSON_DENT)
return '<meta http-equiv="refresh" content ="0; URL=http://localhost:5000/setcal">'
# Get todays events
@gcal_api.route('/mail', methods=['GET'])
def gcal_mail():
return json.dumps(gcal.get_mail(), indent=JSON_DENT)
# === JSON Error Handling ===
# @gcal_api.errorhandler(400)
# def err_400(e):
# return '{"status": 400, "message":"Bad request"}', 400
@gcal_api.errorhandler(404)
def err_404(e):
return '{"status": 404, "message":"Page not found"}', 404
@gcal_api.errorhandler(500)
def err_500(e):
return '{"status": 500, "message":"Internal server error"}', 500
return gcal_api
|
kkopachev/thumbor
|
thumbor/filters/format.py
|
Python
|
mit
| 823
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# License
|
d under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.filters import BaseFilter, filter_method
from thumbor.utils import logger
ALLOWED_FORMATS = ["png", "jpeg", "jpg", "gif", "webp"]
class Filter(BaseFilter):
@
|
filter_method(BaseFilter.String)
async def format(self, file_format):
if file_format.lower() not in ALLOWED_FORMATS:
logger.debug("Format not allowed: %s", file_format.lower())
self.context.request.format = None
else:
logger.debug("Format specified: %s", file_format.lower())
self.context.request.format = file_format.lower()
|
IT-PM-OpenAdaptronik/Webapp
|
apps/calc/measurement/calculus.py
|
Python
|
mit
| 3,360
| 0.009226
|
import numpy as np
import json
import scipy as sci
def get_decimal_delta(data, index,decimals):
'''
This function calculates the difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:param decimals: Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to the left of the decimal point.
:return: a list of distances between all values in the column
'''
res = []
for t1, t2 in zip(data[:-1,int(index)], data[1:,int(index)]):
res.append(np.around(np.float64(t2) - np.float64(t1),decimals))
return np.array(res)
def get_delta(data, index):
'''
This function calculates the difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:param decimals: Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to the left of the decimal point.
:return: a list of distances between all values in the column
'''
realsol = []
i=1
while i < len(data[0:,index]):
intervall = data[i, index] - data[i - 1,index]
realsol.append(intervall)
i += 1
realsol = np.array(realsol)
return realsol
def get_average_delta(data, index):
'''
This function calculates the average difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:return: average between all values in the column
'''
deltas = get_decimal_delta(data, index, 7)
return sum(deltas) / l
|
en(deltas)
def numerical_approx(data, diff_Value1_Index, diff_Value2_Index = 0):
'''
This method derives one Data Column by another
Zeitwerte
Example: d Speed / d Time = Acceleration
:param data: the pandas DataFrame of the data
:param diff_Value1_Index: Index of the Column to get the derivative of
:param diff_Value2_Index: Index of the deriving Column (Usually the Time index)
:return:
'''
diff_Value = []
diff_Valu
|
e.append(np.float_(0.000))
data = np.array(json.loads(data), dtype=np.float64)
for v1, t1 in zip(get_delta(data, int(diff_Value1_Index)), get_delta(data, int(diff_Value2_Index))):
diff_Value.append(v1 / t1)
return np.asarray(diff_Value)
def trapez_for_each(data, index_x, index_y):
"""
This method integrates the given Values with the Trapeziodal Rule
:param index_x: index der X Achse
:param index_y: index der Y Achse
:return: integrated Values from x,y
"""
i = 1
sol = []
data =np.array(json.loads(data),dtype=np.float64)
#data =np.array(json.loads(data),dtype=np.float_)
while i < len(data[:,index_x]):
res = sci.trapz(data[0:i, index_y], data[0:i, index_x])
res = np.float_(res)
sol.append(res)
i += 1
i = 0
realsol = []
while i < len(sol):
intervall = sol[i] - sol[i - 1]
if i == 0:
realsol.append(np.float_(0))
realsol.append(intervall)
i += 1
realsol= np.array(realsol)
return realsol
|
edwarod/quickbot_bbb
|
test.py
|
Python
|
bsd-3-clause
| 571
| 0.033275
|
import Adafruit_BBIO.GPIO as GPIO
import time
a=0
b=0
def derecha(channel):
global a
|
a+=1
print 'cuenta derecha es {0}'.format(a)
def izquierda(channel):
global b
b+=1
print 'cuenta izquierda es {0}'.format(b)
GPIO.setup("P9_11", GPIO.IN)
GPIO.setup("P9_13", GPIO.IN)
GPIO.add_event_detect("P9_11", GPIO.BOTH)
GPIO.add_event_detect("P9_13", GPIO.BOTH)
GPIO.add_event_callback("P9_11",derecha)
GPIO.add_event_callback("P9_13",izquierda)
#if GPIO.event_detected("GPIO_31"):
#
|
print "event detected"
while True:
print "cosas pasan"
time.sleep(1)
|
kbase/narrative_method_store
|
test/data/test_repo_1/service/GenomeFeatureComparatorServer.py
|
Python
|
mit
| 25,877
| 0.002164
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexu
|
s
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name(
|
):
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenomeFeatureComparatorImpl import GenomeFeatureComparator
impl_GenomeFeatureComparator = GenomeFeatureComparator(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
async_run_methods = {}
async_check_methods = {}
async_run_methods['GenomeFeatureComparator.compare_genome_features_async'] = ['GenomeFeatureComparator', 'compare_genome_features']
async_check_methods['GenomeFeatureComparator.compare_genome_features_check'] = ['GenomeFeatureComparator', 'compare_genome_features']
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
|
google-research/google-research
|
robust_optim/data.py
|
Python
|
apache-2.0
| 2,993
| 0.007016
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating a logistic dataset.
x ~ N(0, I_d)
y ~ Bernoulli(sigmoid(-(1/temp) w^T x))
"""
import jax
from jax import numpy as jnp
def logistic_dataset_init_param(dim, r, rng_key):
param0 = jax.random.normal(rng_key, (dim, 1))
param0_norm = jnp.linalg.norm(param0)
param = param0 / param0_norm * r
return param
def logistic_dataset_gen_data(num,
|
w, dim, temp, rng_key):
"""Samples data from a standard Gaussian with binary noisy labels.
Args:
num: An integer denoting the number of data points.
w: An array of size dim x odim, the weight vector used to generate labels.
dim: An integer denoting the number of input dimensions.
temp: A float denoting the temperature parameter controlling label noise.
rng_key: JAX random number generator key.
Retur
|
ns:
x: An array of size dim x num denoting data points.
y_pm: An array of size num x odim denoting +/-1 labels.
"""
rng_subkey = jax.random.split(rng_key, 3)
x = jax.random.normal(rng_subkey[0], (dim, num))
prob = jax.nn.sigmoid(-(1 / temp) * w.T.dot(x))
y = jax.random.bernoulli(rng_subkey[1], (prob))
y_pm = 2. * y - 1
return x, y_pm
def logistic_dataset_gen_train_test(config, rng_key):
"""Creates the train and test sets of a logistic dataset.
Args:
config: Dictionary of parameters.
config.dim: A float denoting input dimensionality.
config.r: A float denoting L2 norm of the true parameters.
config.num_train: An integer denoting the number of training data.
config.num_test: An integer denoting the number of test data.
rng_key: JAX random number generator key.
Returns:
train_data: The tuple (input, label) of training data.
test_data: The tuple (input, label) of test data.
"""
dim = config.dim
temp = config.temperature
rng_subkey = jax.random.split(rng_key, 3)
param = logistic_dataset_init_param(dim, config.r, rng_subkey[0])
train_data = logistic_dataset_gen_data(config.num_train, param, dim, temp,
rng_subkey[1])
test_data = logistic_dataset_gen_data(config.num_test, param, dim, temp,
rng_subkey[2])
return train_data, test_data
def get_train_test_generator(dataset):
if dataset == 'logistic':
return logistic_dataset_gen_train_test
raise NotImplementedError('Dataset not found.')
|
meduz/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
Python
|
bsd-3-clause
| 17,473
| 0.000057
|
import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args:
|
0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classifi
|
cation scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, sc
|
lautr3k/RepRap-iTopie
|
odmt/ezdxf/ac1021/__init__.py
|
Python
|
gpl-3.0
| 330
| 0
|
# Purpose: dxf engine for R2007/AC1021
# Created: 12.03.2011
# Copyright (C) , Manfred Moitzi
# License: MIT Licen
|
se
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from .headervars import VARMAP
from ..ac1018 import AC1018Factory
class AC1021Factory(AC1018Factory):
HEADERVARS = dict(VA
|
RMAP)
|
Curly-Mo/audio
|
__init__.py
|
Python
|
mit
| 23
| 0
|
f
|
rom audio.io import *
| |
xu6148152/Binea_Python_Project
|
PythonCookbook/meta/newlower.py
|
Python
|
mit
| 1,116
| 0.000896
|
# -*- encoding: utf-8 -*-
import ast
import inspect
class NameLower(ast.NodeVisitor):
def __init__(self, lowered_names):
self.lowered_names = lowered_names
def visit_FunctionDef(self, node):
code = '__globals = globals()\n'
code += '\n'.join("{0} = __globals['{0}']".format(name) for name in self.lowered_names)
code_ast = ast.parse(code, mode='exec')
node.body[:0] = code_ast.body
self.func = node
def lower_names(*namelist):
d
|
ef lower(func):
srclines = inspect.getsource(func).split
|
lines()
for n, line in enumerate(srclines):
if '@lower_names' in line:
break
src = '\n'.join(srclines[n + 1:])
if src.startswith(' ', '\t'):
src = 'if 1:\n' + src
top = ast.parse(src, mode='exec')
cl = NameLower(namelist)
cl.visit(top)
temp = {}
exec(compile(top, '', 'exec'), temp, temp)
func.__code__ = temp[func.__name__].__code__
return func
return lower
|
PyWavelets/pywt
|
pywt/tests/test_dwt_idwt.py
|
Python
|
mit
| 10,352
| 0.00058
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_raises,
assert_array_equal)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
except AttributeError:
pass
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.float64), cD.astype(np.float32),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.float64)
def test_idwt_mixed_complex_dtype():
x = np.arange(8).astype(float)
x = x + 1j*x[::-1]
cA, cD = pywt.dwt(x, 'db2')
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.complex128), cD.astype(np.complex64),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.complex128)
def test_dwt_idwt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
x = np.ones(4, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, cD = pywt.dwt(x, wavelet)
assert_(cA.dtype == cD.dtype == dt_out, "dwt: " + errmsg)
x_roundtrip = pywt.idwt(cA, cD, wavelet)
assert_(x_roundtrip.dtype == dt_out, "idwt: " + errmsg)
def test_dwt_idwt_basic_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'db2')
cA_expect = np.asarray([5.65685425, 7.39923721, 0.22414387, 3.33677403,
7.77817459])
cA_expect = cA_expect + 0.5j*cA_expect
cD_expect = np.asarray([-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487])
cD_expect = cD_expect + 0.5j*cD_expect
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_idwt_partial_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'haar')
cA_rec_expect = np.array([5.0+2.5j, 5.0+2.5j, 1.0+0.5j, 1.0+0.5j,
1.5+0.75j, 1.5+0.75j, 5.0+2.5j, 5.0+2.5j])
cA_rec = pywt.idwt(cA, None, 'haar')
assert_allclose(cA_rec, cA_rec_expect)
cD_rec_expect = np.array([-2.0-1.0j, 2.0+1.0j, 0.0+0.0j, 0.0+0.0j,
-3.5-1.75j, 3.5+1.75j, -1.0-0.5j, 1.0+0.5j])
cD_rec = pywt.idwt(None, cD, 'haar')
assert_allclose(cD_rec, cD_rec_expect)
assert_allclose(cA_rec + cD_rec, x)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='constant')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.Modes.modes]
expected_result = [6, ] * len(pywt.Modes.modes)
expected_result[pywt.Modes.modes.index('periodization')] = 4
assert_allclose(ln_modes, expected_result)
ln_modes = [pywt.dwt_coeff_len(len(x), w, mode) for mode in
pywt.Modes.modes]
assert_allclose(ln_modes, expected_result)
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1, 2, 0, 1], None, 'db2', 'symmetric')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'symmetric')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'symmetric')
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1, 2, 4], [4, 1, 3], 'db4', 'symmetric')
def test_dwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=-1)
cA0, cD0 = pywt.dwt(x[0], 'db2')
cA1, cD1 = pywt.dwt(x[1], 'db2')
assert_allclose(cA[0], cA0)
assert_allclose(cA[1], cA1)
assert_allclose(cD[0], cD0)
assert_allclose(cD[1], cD1)
def test_idwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
x = np.asarray(x)
x = x + 1j*x # test with complex data
|
cA, cD = pywt.dwt(x, 'db2'
|
, axis=-1)
x0 = pywt.idwt(cA[0], cD[0], 'db2', axis=-1)
x1 = pywt.idwt(cA[1], cD[1], 'db2', axis=-1)
assert_allclose(x[0], x0)
assert_allclose(x[1], x1)
def test_dwt_invalid_input():
x = np.arange(1)
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
assert_raises(ValueError, pywt.dwt, x, 'haar', 'antireflect')
def test_dwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA_, cD_ = pywt.dwt(x, 'db2', axis=-1)
cA, cD = pywt.dwt(x, 'db2', axis=1)
assert_allclose(cA_, cA)
assert_allclose(cD_, cD)
def test_dwt_axis_invalid_input():
x = np.ones((3,1))
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
def test_idwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=1)
x_ = pywt.idwt(cA, cD, 'db2', axis=-1)
x = pywt.idwt(cA, cD, 'db2', axis=1)
assert_allclose(x_, x)
def test_dwt_idwt_axis_excess():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
# can't transform over axes that aren't there
assert_raises(ValueError,
pywt.dwt, x, 'db2', 'symmetric', axis=2)
assert_raises(ValueError,
pywt.idwt, [1, 2, 4], [4, 1, 3], 'db2', 'symmetric', axis=1)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((32, ))
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, pywt.dwt, data, cwave)
cA, cD = pywt.dwt(data, 'db1')
assert_raises(ValueError, pywt.idwt, cA, cD, cwave)
def test_dwt_zero_size_axes():
# raise on empty input array
assert_raises(ValueError, pywt.dwt, [], 'db2')
# >1D case uses a different code path so check there as well
x = np.ones((1, 4))[0:0, :] # 2D with a size zero axis
assert_raises(ValueError, pywt.dwt, x, 'db2', axis=0)
def test_pad_1d():
x = [1, 2, 3]
assert_array_equal(pywt.pad(x, (4, 6), 'periodization'),
[1, 2, 3, 3, 1, 2, 3, 3, 1, 2, 3, 3, 1, 2])
assert_array_equal(pywt.pad(x, (4, 6), 'periodic'),
[3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'constant'),
[1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'zero'),
|
public0821/nettest
|
nettest/packets/base.py
|
Python
|
apache-2.0
| 7,940
| 0.005038
|
from .fields import BitField, Field
from nettest.exceptions import NettestError
import struct
class PacketMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.get('fields')
if fields is None:
raise NettestError(_("packet class must have 'fields' field"))
_fields = []
for fieldname in attrs['fields']:
field = attrs.get(fieldname)
if field is None:
for baseclass in bases:
field = getattr(baseclass, fieldname)
if field is not None:
break
else:
raise NettestError(_("field '%s' doesn't exsists in class %s")%(fieldname, name))
if not cls.__check_field_type(cls, field):
raise NettestError(_("field '%s' in class %s should be in type (Field, Packet, list)")%(fieldname, name))
_fields.append((fieldname, field))
if isinstance(field, Field):
attrs[fieldname] = field.default_value
if '_fields' in attrs:
raise Net
|
testError(_("the name '_fields' is reserved in class %s")%(name))
attrs['_fields']= _fields
return super(PacketMeta, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __check_field_type(cls, field):
if not isinstance(field, (Field, Packet, list)):
return False
if isinstance(field, (list)):
for subfield in field:
if not cls.__check_field_type(cls, subfield):
return False
|
return True
class BitDumper(object):
def __init__(self):
self.data= []
self.data_len = []
self.data_len_sum = 0
def clear(self):
self.data = []
self.data_len = []
self.data_len_sum = 0
def push(self, data, length):
data = int(data)
if data < 0 or data > 2**length:
raise NettestError(_("bit value out of range"))
self.data.append(data)
self.data_len.append(length)
self.data_len_sum += length
def dump(self):
if self.data_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
data = 0
left_len = self.data_len_sum
index = 0
for field_data in self.data:
data += field_data<<(left_len - self.data_len[index])
left_len -= self.data_len[index]
index += 1
length = self.data_len_sum / 8
if length == 1:
return struct.pack('!B', int(data))
elif length == 2:
return struct.pack('!H', int(data))
elif length == 4:
return struct.pack('!I', int(data))
elif length == 8:
return struct.pack('!Q', int(data))
else:
raise NettestError(_("too long bit field"))
class BitLoader(object):
def __init__(self, packet):
self.fields = []
self.bit_len_sum = 0
self.packet = packet
def clear(self):
self.fields = []
self.bit_len_sum = 0
def push(self, fieldname, field):
self.fields.append((fieldname,field))
self.bit_len_sum += field.length
def load(self, data):
if self.bit_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
byte_len = int(self.bit_len_sum / 8)
data = data[:byte_len]
loaded_len = 0
for field_name, field in self.fields:
field_data = field.from_netbytes(data, loaded_len)
loaded_len += field.length
setattr(self.packet, field_name, field_data)
return byte_len
class Packet(object, metaclass=PacketMeta):
'''define field order
'''
fields=[]
def __init__(self):
for field_name, field in self._fields:
if isinstance(field, Packet):
setattr(self, field_name, field.__class__())
def dump(self):
'''Serialize self to bytes
'''
data = b''
bit_dumper = BitDumper()
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
raise NettestError(_("%s is None and haven't default value")%(field_name))
if isinstance(field, BitField):
bit_dumper.push(field_value, field.length)
continue
else:
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
bit_dumper.clear()
if isinstance(field, Packet):
data += field_value.dump()
continue
data += field.to_netbytes(field_value)
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
return data
# def __dump_list_data(self, fields):
# data = b''
# for field in fields:
# if isinstance(field, Packet):
# data += field.dump()
# continue
# if isinstance(field, list):
# data += self.__dump_list_data()
# continue
# if isinstance(field, Field):
# data += field.to_netbytes(field_value)
# continue
def load(self, data):
'''Deserialize bytes to a self.
if success, return the total data length used
else return None
'''
loaded_len = 0
bit_loader = BitLoader(self)
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_loader.push(field_name, field)
continue
else:
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
bit_loader.clear()
if isinstance(field, Packet):
field_value = getattr(self, field_name)
length = field_value.load(data[loaded_len:])
if length is None:
return None
loaded_len += length
continue
field_data = field.from_netbytes(data[loaded_len:])
if field_data is None:
return None
loaded_len += field.length
setattr(self, field_name, field_data)
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
return loaded_len
def to_printable(self):
string = ''
string += '-'*20+str(self.__class__.__name__)+'-'*20+'\n'
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
string += '%s\tNone\n'%(field_name)
elif isinstance(field, Packet):
string += '%s\t%s\n'%(field_name, field_value.to_printable())
else:
string += '%s\t%s\n'%(field_name, field.to_printable(field_value))
string += '-'*(40+len(self.__class__.__name__))+'\n'
return string
def __eq__(self, other):
for field_name in self.fields:
field_value1 = getattr(self, field_name)
field_value2 = getattr(other, field_name)
if field_value1 != field_value2:
return False
return True
@property
def length(self):
total_len = 0
bit_len = 0
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_len += field.length
elif field.length > 0:
total_len += field.length
else:
field_value = getattr(self, field_name)
total_len += len(field_value)
total_len += int(bit_len/8)
return total_len
|
fhocutt/grantsbot-matching
|
matching/utils.py
|
Python
|
lgpl-3.0
| 2,104
| 0.002376
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
utils
=====
Utility functions for matching.py.
"""
import datetime
import json
import os
#testme
def parse_timestamp(t):
"""Parse MediaWiki-style timestamps and return a datetime."""
if t == '0000-00-00T00:00:00Z':
return None
else:
return datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def load_config(filepath):
"""Given the path to the config file, opens and returns the dict."""
configfile = os.path.join(filepath, 'config.json')
with open(configfile, 'rb') as configf:
config = json.loads(configf.read())
return config
#testme
def make_category_string(categories):
"""Given a list of categories, return the |-separated string."""
return '|'.join(categories)
def timelog(run_time, filepath):
"""Get the timestamp from the last run, then log the current time
(UTC).
"""
timelogfile = os.path.join(filepath, 'time.log') # fixme this currently only works because filepath is in the enclosing scope (main)
try:
with open(timelogfile, 'r+b') as timelog:
|
prevrunt
|
imestamp = timelog.read()
timelog.seek(0)
timelog.write(datetime.datetime.strftime(run_time,
'%Y-%m-%dT%H:%M:%SZ'))
timelog.truncate()
except IOError:
with open(timelogfile, 'wb') as timelog:
prevruntimestamp = ''
timelog.write(datetime.datetime.strftime(run_time,
'%Y-%m-%dT%H:%M:%SZ'))
return prevruntimestamp
#testme
def buildgreeting(greeting, username, ideas):
"""Create a customized greeting string to be posted to a talk page
to present the IdeaLab member with a list of interesting ideas.
Return the wikitext-formatted greeting string.
"""
idea_string = ''
for idea in ideas:
title = idea['profile_title']
idea_string = u'{}* [[{}]]\n'.format(idea_string, title)
full_greeting = greeting.format(username, idea_string)
return full_greeting
|
beeleb/Kandori-Mailath-Rob
|
mc_tools.py
|
Python
|
bsd-3-clause
| 1,915
| 0.0047
|
"""
Filename: mc_tools.py
Authors: John Stachurski and Thomas J. Sargent
"""
import numpy as np
from discrete_rv import DiscreteRV
def mc_compute_stationary(P):
"""
Computes the stationary distribution of Markov matrix P.
Parameters
===========
P : a square 2D NumPy array
Returns: A flat array giving the stationary distribution
"""
n = len(P) # P is n x n
I = np.identity(n) # Identity matrix
B, b = np.ones((n, n)), np.ones((n, 1)) # Matrix and vector of ones
A = np.transpose(I - P + B)
solution = np.linalg.solve(A, b)
return solution.flatten() # Return a flat array
def mc_sample_path(P, init=0, sample_size=1000):
"""
Generates one sample path from a finite Markov chain with (n x n)
|
Markov
matrix P on state space S = {0,...,n-1}.
Parameters
|
==========
P : A nonnegative 2D NumPy array with rows that sum to 1
init : Either an integer in S or a nonnegative array of length n
with elements that sum to 1
sample_size : int
If init is an integer, the integer is treated as the determinstic initial
condition. If init is a distribution on S, then X_0 is drawn from this
distribution.
Returns
========
A NumPy array containing the sample path
"""
# === set up array to store output === #
X = np.empty(sample_size, dtype=int)
if isinstance(init, int):
X[0] = init
else:
X[0] = DiscreteRV(init).draw()
# === turn each row into a distribution === #
# In particular, let P_dist[i] be the distribution corresponding to the
# i-th row P[i,:]
n = len(P)
P_dist = [DiscreteRV(P[i,:]) for i in range(n)]
# === generate the sample path === #
for t in range(sample_size - 1):
X[t+1] = P_dist[X[t]].draw()
return X
|
CINPLA/expipe-dev
|
python-neo/neo/core/irregularlysampledsignal.py
|
Python
|
gpl-3.0
| 20,385
| 0.001864
|
# -*- coding: utf-8 -*-
'''
This module implements :class:`IrregularlySampledSignal`, an array of analog
signals with samples taken at arbitrary time points.
:class:`IrregularlySampledSignal` derives from :class:`BaseNeo`, from
:module:`neo.core.baseneo`, and from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
def _new_IrregularlySampledSignal(cls, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None, description=None,
annotations=None):
'''
A function to map IrregularlySampledSignal.__new__ to function that
does not do the unit checking. This is needed for pickle to work.
'''
return cls(times=times, signal=signal, units=units, time_units=time_units,
dtype=dtype, copy=copy, name=name, file_origin=file_origin,
description=description, **annotations)
class IrregularlySampledSignal(BaseNeo, pq.Quantity):
'''
An array of one or more analog signals with samples taken at arbitrary time points.
A representation of one or more continuous, analog signals acquired at time
:attr:`t_start` with a varying sampling interval. Each channel is sampled
at
|
the same time points.
*Usage*::
>>> from neo.core import IrregularlySampledSignal
>>> from quantities import s, nA
>>>
>>> irsig0 = IrregularlySampledSignal([0.0, 1.23, 6.78], [1,
|
2, 3],
... units='mV', time_units='ms')
>>> irsig1 = IrregularlySampledSignal([0.01, 0.03, 0.12]*s,
... [[4, 5], [5, 4], [6, 3]]*nA)
*Required attributes/properties*:
:times: (quantity array 1D, numpy array 1D, or list)
The time of each data point. Must have the same size as :attr:`signal`.
:signal: (quantity array 2D, numpy array 2D, or list (data, channel))
The data itself.
:units: (quantity units)
Required if the signal is a list or NumPy array, not if it is
a :class:`Quantity`.
:time_units: (quantity units) Required if :attr:`times` is a list or
NumPy array, not if it is a :class:`Quantity`.
*Recommended attributes/properties*:.
:name: (str) A label for the dataset
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
(times are always floats).
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_intervals: (quantity array 1D) Interval between each adjacent
pair of samples.
(``times[1:] - times[:-1]``)
:duration: (quantity scalar) Signal duration, read-only.
(``times[-1] - times[0]``)
:t_start: (quantity scalar) Time when signal begins, read-only.
(``times[0]``)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(``times[-1]``)
*Slicing*:
:class:`IrregularlySampledSignal` objects can be sliced. When this
occurs, a new :class:`IrregularlySampledSignal` (actually a view) is
returned, with the same metadata, except that :attr:`times` is also
sliced in the same way.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'ChannelIndex')
_quantity_attr = 'signal'
_necessary_attrs = (('times', pq.Quantity, 1),
('signal', pq.Quantity, 2))
def __new__(cls, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None,
description=None,
**annotations):
'''
Construct a new :class:`IrregularlySampledSignal` instance.
This is called whenever a new :class:`IrregularlySampledSignal` is
created from the constructor, but not when slicing.
'''
if units is None:
if hasattr(signal, "units"):
units = signal.units
else:
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# could improve this test, what if units is a string?
if units != signal.units:
signal = signal.rescale(units)
if time_units is None:
if hasattr(times, "units"):
time_units = times.units
else:
raise ValueError("Time units must be specified")
elif isinstance(times, pq.Quantity):
# could improve this test, what if units is a string?
if time_units != times.units:
times = times.rescale(time_units)
# should check time units have correct dimensions
obj = pq.Quantity.__new__(cls, signal, units=units,
dtype=dtype, copy=copy)
if obj.ndim == 1:
obj = obj.reshape(-1, 1)
if len(times) != obj.shape[0]:
raise ValueError("times array and signal array must "
"have same length")
obj.times = pq.Quantity(times, units=time_units,
dtype=float, copy=copy)
obj.segment = None
obj.channel_index = None
return obj
def __init__(self, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None, description=None,
**annotations):
'''
Initializes a newly constructed :class:`IrregularlySampledSignal`
instance.
'''
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
def __reduce__(self):
'''
Map the __new__ function onto _new_IrregularlySampledSignal, so that pickle
works
'''
return _new_IrregularlySampledSignal, (self.__class__,
self.times,
np.array(self),
self.units,
self.times.units,
self.dtype,
True,
self.name,
self.file_origin,
self.description,
self.annotations)
def __array_finalize__(self, obj):
'''
This is called every time a new :class:`IrregularlySampledSignal` is
created.
It is the appropriate place to set default values for attributes
for :class:`IrregularlySampledSignal` constructed by slicing or
viewing.
User-specified values are only relevant for construction from
constructor, and these are set in __new__. Then they are just
copied over here.
'''
super(IrregularlySampledSignal, self).__array_finalize__(obj)
self.times = getattr(obj, 'times', None)
# The additional arguments
s
|
openhatch/new-mini-tasks
|
vendor/packages/Django/tests/regressiontests/utils/dateformat.py
|
Python
|
apache-2.0
| 6,241
| 0.001282
|
from __future__ import unicode_literals
from datetime import datetime, date
import os
import time
from django.utils.dateformat import format
from django.utils import dateformat, translation, unittest
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset, LocalTimezone
class DateFormatTests(unittest.TestCase):
def setUp(self):
self.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'Europe/Copenhagen'
translation.activate('en-us')
try:
# Check if a timezone has been set
time.tzset()
self.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
self.tz_tests = False
def tearDown(self):
if self.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = self.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if self.tz_tests:
time.tzset()
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16,
|
5, 30, 30)
self.assertEqual(datetime.fro
|
mtimestamp(int(format(dt, 'U'))), dt)
def test_datetime_with_local_tzinfo(self):
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
def test_datetime_with_tzinfo(self):
tz = FixedOffset(-510)
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
if self.tz_tests:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
# 3h30m to the west of UTC
tz = FixedOffset(-3*60 - 30)
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(dateformat.format(dt, 'O'), '-0330')
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/solnlib/conf_manager.py
|
Python
|
isc
| 14,825
| 0.000742
|
# Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module contains simple interfaces for Splunk config file management,
you can update/get/delete stanzas and encrypt/decrypt some fields of stanza
automatically.
'''
import json
import logging
import traceback
from . import splunk_rest_client as rest_client
from .credentials import CredentialManager
from .credentials import CredentialNotExistException
from .packages.splunklib import binding
from .utils import retry
__all__ = ['ConfStanzaNotExistException',
'ConfFile',
'ConfManagerException',
'ConfManager']
class ConfStanzaNotExistException(Exception):
pass
class ConfFile(object):
'''Configuration file.
:param name: Configuration file name.
:type name: ``string``
:param conf: Configuration file object.
:type conf: ``splunklib.client.ConfigurationFile``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param realm: (optional) Realm of credential, default is None.
:type realm: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
'''
ENCRYPTED_TOKEN = '******'
reserved_keys = ('userName', 'appName')
def __init__(self, name, conf, session_key, app, owner='nobody',
scheme=None, host=None, port=None, realm=None,**context):
self._name = name
self._conf = conf
self._session_key = session_key
self._app = app
self._owner = owner
self._scheme = scheme
self._host = host
self._port = port
self._context = context
self._cred_manager = None
### 'realm' is set to provided 'realm' argument otherwise as default behaviour it is set to 'APP_NAME'.
if realm is None:
self._realm = self._app
else:
self._realm = realm
@property
@retry(exceptions=[binding.HTTPError])
def _cred_mgr(self):
if self._cred_manager is None:
self._cred_manager = CredentialManager(
self._session_key, self._app, owner=self._owner,
realm=self._realm, scheme=self._scheme, host=self._host,
port=self._port, **self._context)
return self._cred_manager
def _filter_stanza(self, stanza):
for k in self.reserved_keys:
if k in stanza:
del stanza[k]
return stanza
def _encrypt_stanza(self, stanza_name, stanza, encrypt_keys):
if not encrypt_keys:
return stanza
encrypt_stanza_keys = [ k for k in encrypt_keys if k in stanza ]
encrypt_fields = {key: stanza[key] for key in encrypt_stanza_keys}
if not encrypt_fields:
return stanza
self._cred_mgr.set_password(stanza_name, json.dumps(encrypt_fields))
for key in encrypt_stanza_keys:
stanza[key] = self.ENCRYPTED_TOKEN
return stanza
def _decrypt_stanza(self, stanza_name, encrypted_stanza):
encrypted_keys = [key for key in encrypted_stanza if
encrypted_stanza[key] == self.ENCRYPTED_TOKEN]
if encrypted_keys:
encrypted_fields = json.loads(
self._cred_mgr.get_password(stanza_name))
for key in encrypted_keys:
encrypted_stanza[key] = encrypted_fields[key]
return encrypted_stanza
def _delete_stanza_creds(self, stanza_name):
self._cred_mgr.delete_password(stanza_name)
@retry(exceptions=[binding.HTTPError])
def stanza_exist(self, stanza_name):
'''Check whether stanza exists.
:param stanza_name: Stanza name.
:type stanza_name: ``string``
:returns: True if stanza exists else False.
:rtype: ``bool``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.stanza_exist('test_stanza')
'''
try:
self._conf.list(name=stanza_name)[0]
except binding.HTTPError as e:
|
if e.status != 404:
raise
return False
return True
@retry(exceptions=[binding.HTTPError])
|
def get(self, stanza_name, only_current_app=False):
'''Get stanza from configuration file.
:param stanza_name: Stanza name.
:type stanza_name: ``string``
:returns: Stanza, like: {
'disabled': '0',
'eai:appName': 'solnlib_demo',
'eai:userName': 'nobody',
'k1': '1',
'k2': '2'}
:rtype: ``dict``
:raises ConfStanzaNotExistException: If stanza does not exist.
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.get('test_stanza')
'''
try:
if only_current_app:
stanza_mgrs = self._conf.list(
search='eai:acl.app={} name={}'.format(
self._app, stanza_name.replace('=', r'\=')))
else:
stanza_mgrs = self._conf.list(name=stanza_name)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ConfStanzaNotExistException(
'Stanza: %s does not exist in %s.conf' %
(stanza_name, self._name))
if len(stanza_mgrs) == 0:
raise ConfStanzaNotExistException(
'Stanza: %s does not exist in %s.conf' %
(stanza_name, self._name))
stanza = self._decrypt_stanza(stanza_mgrs[0].name, stanza_mgrs[0].content)
stanza['eai:access'] = stanza_mgrs[0].access
stanza['eai:appName'] = stanza_mgrs[0].access.app
return stanza
@retry(exceptions=[binding.HTTPError])
def get_all(self, only_current_app=False):
'''Get all stanzas from configuration file.
:returns: All stanzas, like: {'test': {
'disabled': '0',
'eai:appName': 'solnlib_demo',
'eai:userName': 'nobody',
'k1': '1',
'k2': '2'}}
:rtype: ``dict``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.get_all()
'''
if only_current_app:
stanza_mgrs = self._conf.list(search='eai:acl.app={}'.format(self._app))
else:
stanza_mgrs = self._conf.list()
res = {}
for stanza_mgr in stanza_mgrs:
name = stanza_mgr.name
key_values = self._decrypt_stanza(name, stanza_mgr.content)
key_values['eai:access'] = stanza_mgr.access
key_values['eai:appName'] = stanza_mgr.access.app
res[name] = key_values
return res
@retry(except
|
lavish205/olympia
|
src/olympia/addons/tests/test_forms.py
|
Python
|
bsd-3-clause
| 13,654
| 0
|
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.client import RequestFactory
from mock import patch
from olympia import amo, core
from olympia.addons import forms
from olympia.addons.models import Addon, Category
from olympia.amo.tests import TestCase, addon_factory, req_factory_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import rm_local_tmp_dir
from olympia.tags.models import AddonTag, Tag
from olympia.users.models import UserProfile
class TestAddonFormSupport(TestCase):
def test_bogus_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'javascript://something.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_ftp_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'ftp://foo.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_http_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'http://foo.com'}, request=None)
assert form.is_valid()
class FormsTest(TestCase):
fixtures = ('base/addon_3615', 'base/addon_3615_categories',
'addons/denied')
def setUp(self):
super(FormsTest, self).setUp()
self.existing_name = 'Delicious Bookmarks'
self.non_existing_name = 'Does Not Exist'
self.error_msg = 'This name is already in use. Please choose another.'
self.request = req_factory_factory('/')
def test_locales(self):
form = forms.AddonFormDetails(request=self.request)
assert form.fields['default_locale'].choices[0][0] == 'af'
def test_slug_deny(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': 'submit'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "submit". Please choose another.'])
def test_name_trademark_mozilla(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_firefox(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Firefox', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_allowed_for_prefix(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious for Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_name_no_trademark(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Dumdidum', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_bogus_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'javascript://something.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_ftp_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'ftp://foo.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_homepage_is_not_required(self):
delicious = Addon.objects.get()
form = forms.AddonFormDetails(
{'default_locale': 'en-US'},
request=self.request, instance=delicious)
assert form.is_valid()
def test_slug_isdigit(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': '123'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "123". Please choose another.'])
class TestTagsForm(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestTagsForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
category = Category.objects.get(pk=22)
category.db_name = 'test'
category.save()
self.data = {
'summary': str(self.addon.summary),
'name': str(self.addon.name),
'slug': self.addon.slug,
}
self.user = self.addon.authors.all()[0]
core.set_user(self.user)
self.request = req_factory_factory('/')
def add_tags(self, tags):
data = self.data.copy()
data.update({'tags': tags})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
return form
def get_tag_text(self):
return [t.tag_text for t in self.addon.tags.all()]
def test_tags(self):
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
def test_tags_xss(self):
self.add_tags('<script>alert("foo")</script>, bar')
assert self.get_tag_text() == ['bar', 'scriptalertfooscript']
def test_tags_case_spaces(self):
self.add_tags('foo, bar')
self.add_tags('foo, bar , Bar, BAR, b a r ')
assert self.get_tag_text() == ['b a r', 'bar', 'foo']
def test_tags_spaces(self):
self.add_tags('foo, bar beer')
assert self.get_tag_text() == ['bar beer', 'foo']
def test_tags_unicode(self):
self.add_tags(u'Österreich')
assert self.get_tag_text() == [u'Österreich'.lower()]
def add_restricted(self, *args):
if not args:
args = ['i_am_a_restricted_tag']
for arg in args:
tag = Tag.objects.create(tag_text=arg, restricted=True)
AddonTag.objects.create(tag=tag, addon=self.addon)
def test_tags_restricted(self):
self.add_restricted()
self.add_tags('foo, bar')
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo'
assert self.get_tag_text() == ['bar', 'foo', 'i_am_a_restricted_tag']
self.add_tags('')
assert self.get_tag_text() == ['i_am_a_restricted_tag']
def test_tags_error(self):
self.add_restricted('i_am_a_restricted_tag', 'sdk')
data = self.data.copy()
data.update({'tags': 'i_am_a_restricted_tag'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag" is a reserved tag and cannot be used.')
data.update({'tags': 'i_am_a_restricted_tag, sdk'})
form = forms.
|
AddonFormBasic(data=data, request=self.request,
instance=self.addon)
|
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag", "sdk" are reserved tags and'
' cannot be used.')
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted(self, action_allowed):
action_allowed.return_value = True
self.add_r
|
theojulienne/pyio
|
pyio/io/StreamWatcher.py
|
Python
|
mit
| 3,517
| 0.063122
|
from collections import namedtuple
import select
StreamEvent = namedtuple( 'StreamEvent', [ 'fd', 'stream', 'data', 'direction', 'num_bytes', 'eof' ] )
class StreamWatcher(object):
def __init__( self ):
if _best_backend is None:
raise Exception( "No poll/queue backend could be found for your OS." )
self.backend = _best_backend( )
self.fd_map = {}
self.stream_map = {}
def watch( self, fd, data=None, read=True, write=False ):
# allow python file-like objects that have a backing fd
if hasattr(fd, 'fileno') and callable(fd.fileno):
stream = fd
fd = stream.fileno()
self.stream_map[fd] = stream
else:
self.stream_map[fd] = None
# associate user data with the fd
self.fd_map[fd] = data
# prepare any event filter additions
if read:
self.backend.watch_read( fd )
if write:
self.backend.watch_write( fd )
def wait( self, timeout=None, max_events=4 ):
return self.backend.wait(
timeout=timeout,
ma
|
x_events=max_events,
fd_data_map=self.fd_map,
fd_stream_map=self.stream_map )
_best_backend = None
try:
from select import kqueue, kevent
except ImportError:
pass
else:
class KQueueBackend(object):
def __init__( self ):
self.kq = kqueue( )
def watch_read( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def watch_write( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_
|
EV_ADD )
self._add_events( [event] )
def _add_events( self, new_events ):
e = self.kq.control( new_events, 0, 0 )
assert len(e) == 0, "Not expecting to receive any events while adding filters."
def wait( self, timeout=None, max_events=4, fd_data_map={}, fd_stream_map={} ):
r_events = self.kq.control( None, max_events, timeout )
e = []
for event in r_events:
fd = event.ident
if fd in fd_data_map:
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
direction = 'read' if event.filter == select.KQ_FILTER_READ else 'write'
num_bytes = event.data
eof = ( event.flags & select.KQ_EV_EOF != 0 )
e.append( StreamEvent( fd, stream, data, direction, num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = KQueueBackend
try:
from select import epoll
from fcntl import ioctl
import array
import termios
except ImportError:
pass
else:
class EPollBackend(object):
def __init__( self ):
self.ep = epoll( )
def watch_read( self, fd ):
self.ep.register( fd, select.EPOLLIN )
def watch_write( self, fd ):
self.ep.register( fd, select.EPOLLOUT )
def wait( self, timeout=None, max_events=None, fd_data_map={}, fd_stream_map={} ):
if max_events is None:
max_events = -1
if timeout is None:
timeout = -1
r_events = self.ep.poll( timeout, max_events )
e = []
for fd, event in r_events:
if fd in fd_data_map:
buf = array.array( 'i', [0] )
ioctl( fd, termios.FIONREAD, buf, 1 )
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
num_bytes = buf[0]
eof = ( event & (select.EPOLLHUP | select.EPOLLERR) != 0 )
if event & select.EPOLLIN != 0:
e.append( StreamEvent( fd, stream, data, 'read', num_bytes, eof ) )
if event & select.EPOLLOUT != 0:
e.append( StreamEvent( fd, stream, data, 'write', num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = EPollBackend
|
openego/oeplatform
|
login/urls.py
|
Python
|
agpl-3.0
| 2,215
| 0.004966
|
from django.conf.urls import include, url
from django.urls import path
from login import views
from django.contrib.auth.views import PasswordResetCompleteView, PasswordResetConfirmView, PasswordResetDoneView
urlpatterns = [
path('password_reset/', views.PasswordResetView.as_view(
html_email_template_name="registration/password_reset_email.html",
email_template_name="registration/password_reset_email.txt",
template_name='registration/custom_password_reset_form.html'), name='password_reset'),
path('password_reset/done/', PasswordResetDoneView.as_view(template_name = 'registration/custom_password_reset_done.html'), name='password_reset_done'),
path('reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(template_name = 'registration/custom_password_reset_confirm.html'), name='password_reset_confirm'),
path('reset/done/', PasswordResetCompleteView.as_view(template_name='registration/custom_password_reset_complete.html'), name='password_reset_complete'),
url("^", include("django.contrib.auth.urls")),
url(r"^profile/(?P<user_id>[\d]+)$", views.ProfileView.as_view(), name="input"),
url(
r"^profile/password_change$",
views.OEPPasswordChangeView.as_view(),
na
|
me="input",
),
url(
r"^profile/(?P<user_id>[\d]+)/edit$", views.EditUserView.as_view(), name="input"
),
url(r"^groups/$", views.GroupManagement.as_view(), name="input"),
url(
r"^groups/new/$",
views.GroupCreate.as_view(),
name="input",
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/edit$",
views.GroupCreate.as_view(),
name="input",
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/$",
|
views.GroupView.as_view(),
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/members$",
views.GroupEdit.as_view(),
name="input",
),
url(r"^groups/new/$", views.GroupCreate.as_view(), name="input"),
url(r"^register$", views.CreateUserView.as_view()),
url(r"^detach$", views.DetachView.as_view()),
url(r"^activate/(?P<token>[\w\d\-\s]+)$", views.activate),
url(r"^activate/$", views.ActivationNoteView.as_view(), name="activate"),
]
|
tensorflow/federated
|
tensorflow_federated/python/core/impl/computation/__init__.py
|
Python
|
apache-2.0
| 651
| 0
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache
|
License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License f
|
or the specific language governing permissions and
# limitations under the License.
"""Libraries for interacting with a computation."""
|
UltrosBot/Ultros3K
|
src/ultros/core/networks/base/servers/base.py
|
Python
|
artistic-2.0
| 942
| 0.002123
|
# coding=utf-8
from abc import ABCMeta, abstractmethod
from typing import Optional
from weakref import ref
from logging import getLogge
|
r
from ultros.core.networks.base.connectors import base as base_connector
from ultros.core.networks.base.networks import base as base_network
__author__ = "Gareth Coles"
class BaseServer(metaclass=ABCMeta):
def __init__(self, name: str, network: "base_network.BaseNetwork"):
self.name = name
self._network = ref(network)
self.logger = getL
|
ogger(self.name) # TODO: Logging
@property
def network(self) -> "base_network.BaseNetwork":
return self._network()
@abstractmethod
async def connector_connected(self, connector: "base_connector.BaseConnector"):
pass
@abstractmethod
async def connector_disconnected(self, connector: "base_connector.BaseConnector",
exc: Optional[Exception]):
pass
|
MarcFord/Emburse-python
|
emburse/client.py
|
Python
|
gpl-3.0
| 4,177
| 0.002394
|
from emburse.resource import (
EmburseObject,
Account,
Allowance,
Card,
|
Category,
Company,
Department,
Label,
Location,
Member,
SharedLink,
Statement,
Transaction
)
class Client(EmburseObject):
"""
Emburse API Client
API enables for the cre
|
ation of expense cards at scale for custom business solutions as well as for
third-party app integrations. Cards can be created with set spending limits and assigned with just an email.
Some use cases include vendor payments, employee expense control, and fleet card management.
API Version:
v1
API Docs:
https://www.emburse.com/api/v1/docs#getting-started
Authors:
Marc Ford <marc.ford@gmail.com>
"""
@property
def Account(self):
"""
Emburse Account Object,
configured with the auth token from the client
:return: A configured emburse.resource.Account
:rtype: Account
"""
return Account(auth_token=self.auth_token)
@property
def Allowance(self):
"""
Emburse Allowance Object,
configured with the auth token from the client
:return: A configured emburse.resource.Allowance
:rtype: Allowance
"""
return Allowance(auth_token=self.auth_token)
@property
def Card(self):
"""
Emburse Card Object,
configured with the auth token from the client
:return: A configured emburse.resource.Card
:rtype: Card
"""
return Card(auth_token=self.auth_token)
@property
def Category(self):
"""
Emburse Category Object,
configured with the auth token from the client
:return: A configured emburse.resource.Category
:rtype: Category
"""
return Category(auth_token=self.auth_token)
@property
def Company(self):
"""
Emburse Company Object,
configured with the auth token from the client
:return: A configured emburse.resource.Company
:rtype: Company
"""
return Company(auth_token=self.auth_token)
@property
def Department(self):
"""
Emburse Department Object,
configured with the auth token from the client
:return: A configured emburse.resource.Department
:rtype: Department
"""
return Department(auth_token=self.auth_token)
@property
def Label(self):
"""
Emburse Label Object,
configured with the auth token from the client
:return: A configured emburse.resource.Label
:rtype: Label
"""
return Label(auth_token=self.auth_token)
@property
def Location(self):
"""
Emburse Location Object,
configured with the auth token from the client
:return: A configured emburse.resource.Location
:rtype: Location
"""
return Location(auth_token=self.auth_token)
@property
def Member(self):
"""
Emburse Member Object,
configured with the auth token from the client
:return: A configured emburse.resource.Member
:rtype: Member
"""
return Member(auth_token=self.auth_token)
@property
def SharedLink(self):
"""
Emburse SharedLink Object,
configured with the auth token from the client
:return: A configured emburse.resource.SharedLink
:rtype: SharedLink
"""
return SharedLink(auth_token=self.auth_token)
@property
def Statement(self):
"""
Emburse Statement Object,
configured with the auth token from the client
:return: A configured emburse.resource.Statement
:rtype: Statement
"""
return Statement(auth_token=self.auth_token)
@property
def Transaction(self):
"""
Emburse Transaction Object,
configured with the auth token from the client
:return: A configured emburse.resource.Transaction
:rtype: Transaction
"""
return Transaction(auth_token=self.auth_token)
|
flavour/iscram
|
modules/s3/s3aaa.py
|
Python
|
mit
| 198,273
| 0.003299
|
# -*- coding: utf-8 -*-
""" Authentication, Authorization, Accouting
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: (c) 2010-2012 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["AuthS3",
"S3Permission",
"S3Audit",
"S3RoleManager",
"FaceBookAccount",
"GooglePlusAccount",
]
import datetime
import re
import time
import urllib
from urllib import urlencode
import urllib2
from gluon import *
from gluon.storage import Storage, Messages
from gluon.dal import Field, Row, Query, Set, Table, Expression
from gluon.sqlhtml import CheckboxesWidget, StringWidget
from gluon.tools import Auth, callback, addrow
from gluon.utils import web2py_uuid
from gluon.validators import IS_SLUG
from gluon.contrib import simplejson as json
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
from s3method import S3Method
from s3validators import IS_ACL
from s3widgets import S3ACLWidget, CheckboxesWidgetS3
from s3utils import s3_mark_required
from s3fields import s3_uid, s3_timestamp, s3_deletion_status
DEFAULT = lambda: None
table_field = re.compile("[\w_]+\.[\w_]+")
DEBUG = False
if DEBUG:
import sys
print >> sys.stderr, "S3AAA: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class AuthS3(Auth):
"""
S3 extensions of the gluon.tools.Auth class
- override:
define_tables()
login()
register()
profile()
verify_email()
requires_membership()
- add:
s3_has_role()
s3_has_permission()
s3_logged_in()
s3_accessible_query()
s3_impersonate()
s3_register() callback
s3_link_to_person()
s3_verify_email_onaccept()
s3_group_members()
s3_user_to_person()
s3_person_to_user()
person_id()
- language
- utc_offset
- organisation
- @ToDo: Facility
"""
# Configuration of UIDs for system roles
S3_SYSTEM_ROLES = Storage(ADMIN = "ADMIN",
AUTHENTICATED = "AUTHENTICATED",
ANONYMOUS = "ANONYMOUS",
EDITOR = "EDITOR",
MAP_ADMIN = "MAP_ADMIN")
def __init__(self):
""" Initialise parent class & make any necessary modifications """
Auth.__init__(self, current.db)
deployment_settings = current.deployment_settings
system_name = deployment_settings.get_system_name()
self.settings.lock_keys = False
self.settings.username_field = False
self.settings.lock_keys = True
self.
|
messages.lock_keys = False
self.messages.registration_pending_approval = "Account registered, however registration is still pending approval - please wait until confirmation received."
self.messages.email_approver_failed = "Failed to send mail to Approver - see if you can notify them manually!"
self.messages.email_verification_failed = "Unable to send verification email - either your email is invali
|
d or our email server is down"
self.messages.email_sent = "Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters"
self.messages.email_verified = "Email verified - you can now login"
self.messages.welcome_email_subject = "Welcome to %(system_name)s" % \
dict(system_name=system_name)
self.messages.welcome_email = \
"Welcome to %(system_name)s - click on the link %(url)s to complete your profile" % \
dict(system_name = system_name,
url = deployment_settings.get_base_public_url() + URL("default", "user", args=["profile"]))
self.messages.duplicate_email = "This email address is already in use"
self.messages.registration_disabled = "Registration Disabled!"
self.messages.registration_verifying = "You haven't yet Verified your account - please check your email"
self.messages.label_organisation_id = "Organization"
self.messages.label_site_id = "Facility"
self.messages.label_utc_offset = "UTC Offset"
self.messages.label_image = "Profile Image"
self.messages.help_utc_offset = "The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones."
self.messages.help_mobile_phone = "Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages."
self.messages.help_organisation = "Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions."
self.messages.help_image = "You can either use %(gravatar)s or else upload a picture here. The picture will be resized to 50x50."
#self.messages.logged_in = "Signed In"
#self.messages.submit_button = "Signed In"
#self.messages.logged_out = "Signed Out"
self.messages.lock_keys = True
# S3Permission
self.permission = S3Permission(self)
# Set to True to override any authorization
self.override = False
# Site types (for OrgAuth)
T = current.T
if deployment_settings.get_ui_camp():
shelter = T("Camp")
else:
shelter = T("Shelter")
self.org_site_types = Storage(
cr_shelter = shelter,
#org_facility = T("Facility"),
org_facility = T("Site"),
org_office = T("Office"),
hms_hospital = T("Hospital"),
#project_site = T("Project Site"),
#fire_station = T("Fire Station"),
)
# -------------------------------------------------------------------------
def define_tables(self, migrate=True, fake_migrate=False):
"""
to be called unless tables are defined manually
usages::
# defines all needed tables and table files
# UUID + "_auth_user.table", ...
auth.define_tables()
# defines all needed tables and table files
# "myprefix_auth_user.table", ...
auth.define_tables(migrate="myprefix_")
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = current.db
request =
|
tjmcewan/odin
|
setup.py
|
Python
|
bsd-3-clause
| 1,229
| 0
|
from setuptools import setup, find_packages
try:
long_description = open("README.rst").read()
except IOError:
long_description = ""
setup(
name='odin',
version='0.4.2',
url='https://github.com/timsavage/odin',
license='LICENSE',
author='Tim Savage',
author_email='tim.savage@poweredbypenguins.org',
description='Object Data Mapping for Python',
long_description=long_description,
packages=find_packages(),
install_requires=['six'],
extras_require={
# Documentation
|
generation
'doc_gen': ["jinja2>=2.7"],
# Pint integration
'pint': ["pint"],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programmin
|
g Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
science-en-poche/yelandur
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 7,995
| 0.007383
|
# -*- coding: utf-8 -*-
#
# Yelandur documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 10 16:10:42 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Yelandur'
copyright = u'2013, Sébastien Lerique'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
|
theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_s
|
hort_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yelandurdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Yelandur.tex', u'Yelandur Documentation',
u'Sébastien Lerique', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yelandur', u'Yelandur Documentation',
[u'Sébastien Lerique'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Yelandur', u'Yelandur Documentation',
u'Sébastien Lerique', 'Yelandur', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
pyrate-build/pyrate-build
|
examples/test03.py
|
Python
|
apache-2.0
| 489
| 0.03272
|
import logging
try:
create_external('xml2', build_helper = 'xml2-config',
version_query = '--version', version_parser = lambda x: 'invalid')
except Exception:
logging.critical('external version parsing')
try:
tools['c'].std = 'latest'
except Exception:
logging.critical('std setting
|
')
try:
shared_library('x', [])
except Exception:
logging.critical('shared_library
|
: empty input')
try:
static_library('x', [])
except Exception:
logging.critical('static_library: empty input')
|
biomodels/MODEL8687196544
|
MODEL8687196544/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
import os
path = os.path.dirname(os.path.rea
|
lpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL8687196544.xml')
with open(sbmlFilePath,'r') as f:
sbmlSt
|
ring = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
liangsuilong/ztq
|
ztq_demo/tasks.py
|
Python
|
mit
| 586
| 0.020478
|
# encoding: utf-8
from ztq_core import async
import time
@async
def send(body):
print 'START: ', body
time.sleep(3)
print 'END: ', body
@async(queue='mail')
def send_failed(body):
print 'FAIL START:', body
rais
|
e Exception('connection error...')
@async(queue='mail')
def failed_callback(ret
|
urn_code, return_msg):
print 'FAILED CALLBACK:', return_code, return_msg
@async(queue='index')
def index(data):
print 'INDEX:', data
time.sleep(1)
def do_commit():
print 'COMMITTED'
import ztq_worker
ztq_worker.register_batch_queue('index', 5, do_commit)
|
tvarney/txtrpg
|
rpg/io/configuration.py
|
Python
|
mit
| 15,670
| 0
|
import abc
import os
import os.path
import platform
import yaml
import typing
if typing.TYPE_CHECKING:
from typing import Any, Dict, IO, Optional, Sequence, Type
class PropertyTypeError(ValueError):
def __init__(self, property_name: str, value: 'Any') -> None:
ValueError.__init__(self, "can not set {} property to type {}".format(
property_name, type(value)
))
class Context(object):
"""Context keeps track of the value being unpacked.
The Context type keeps track of a file name and field name which are used
to describe the context of an error when unpacking a configuration object.
A Context must be provided to the unpack function of a configuration
Property, which uses the context to write errors to. The result of the
unpack operation should be a bool indicating overall success or failure,
while the Context can be inspected to check how many errors occurred.
If the `writer` field is not None, errors will be written to it. This must
be set to a type which supports the write function (e.g. sys.stderr), and
each error will result in multiple calls to write. Capturing the output
can be done by setting the `writer` field to an io.StringIO.
"""
def __init__(self, file_name: str = "", field_name: str = "") -> None:
"""Create a new Context.
:param file_name: The filename of the context
:param field_name: The name of the current field
"""
self.file = file_name
self.field = field_name
self.error_count = 0
self.writer = None
def error(self, message: str, *args, **kwargs) -> None:
if self.writer is not None:
if self.file != "":
self.writer.write(self.file)
self.writer.write(": ")
if self.field != "":
self.writer.write(self.field)
self.writer.write(": ")
self.writer.write(message.format(*args, **kwargs))
self.writer.write("\n")
self.error_count += 1
def invalid_type(self, expected_type: 'Type', value: 'Any') -> None:
self.error(
"invalid type; expected {}, got {} ({})",
expected_type, value, type(value)
)
class Property(abc.ABC):
"""Property is the abstract base class of all Configuration properties."""
def __init__(self):
abc.ABC.__init__(self)
@abc.abstractmethod
def copy(self) -> 'Property':
raise NotImplementedError
@abc.abstractmethod
def unpack(self, value: 'Any', context: 'Context') -> bool:
raise NotImplementedError
@abc.abstractmethod
def pack(self) -> 'Any':
raise NotImplementedError
class PrimitiveProperty(Property, abc.ABC):
def __init__(self, default: 'Any') -> None:
Property.__init__(self)
self._default = default
self._value = default
def __repr__(self) -> str:
return str(self._value)
def __str__(self) -> str:
return str(self._value)
def __bool__(self) -> bool:
return bool(self._value)
def __int__(self) -> int:
return int(self._value)
def __float__(self) -> float:
return float(self._value)
def __eq__(self, other: 'Any') -> bool:
return self._value == other
def __ne__(self, other: 'Any') -> bool:
return self._value != other
def __lt__(self, other: 'Any') -> bool:
return self._value < other
def __le__(self, other: 'Any') -> bool:
return self._value <= other
def __gt__(self, other: 'Any') -> bool:
return self._value > other
def __ge__(self, other: 'Any') -> bool:
return self._value >= other
def pack(self) -> 'Any':
return self._value
class Boolean(PrimitiveProperty):
"""A boolean property."""
def __init__(self, default: bool = False) -> None:
"""Create a new Boolean property.
:param default: The default value of the property
"""
if type(default) is not bool:
raise PropertyTypeError("Boolean", default)
PrimitiveProperty.__init__(self, default)
@property
def value(self) -> bool:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: bool) -> None:
if type(value) is not bool:
raise PropertyTypeError("Boolean", value)
self._value = value
@property
def default(self) -> bool:
"""The default value of the property."""
return self._default
def copy(self) -> 'Boolean':
"""Create a copy of this Boolean Property.
:returns: A copy of this boolean property
"""
b = Boolean(self._default)
b._value = self._value
return b
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Boolean property.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not bool:
context.invalid_type(bool, value)
return False
self._value = value
return True
class Integer(PrimitiveProperty):
"""An integer property."""
def __init__(self, default: int = 0) -> None:
"""Create a new Integer property.
:param default: The default value of the property
"""
if type(default) is not int:
raise PropertyTypeError("Integer", default)
PrimitiveProperty.__init__(self, default)
@property
def value(self) -> int:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: int) -> None:
if type(value) is not int:
raise PropertyTypeError("Integer", value)
self._value = value
@property
def default(self) -> int:
"""The default value of the property (read-only)."""
return self._default
def copy(self) -> 'Integer':
"""Create a copy of this Integer Property.
:returns: A copy of this Integer
"""
i = Integer(self._default)
i._value = self._value
return i
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Integer Property.
The value being unpacked must be an int, otherwise an error is written
to the context and False is returned.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not int:
context.invalid_type(int, value)
return False
self._value = value
return True
class Float(PrimitiveProperty):
"""A Float property."""
def __init__(self, default: f
|
loat = 0.0) -> None:
"""Create a new Float property.
:param default: The default value of the property
"""
if type(default) is not float:
raise PropertyTypeError("Float", default)
PrimitiveProperty.__init__(self, default)
@property
d
|
ef value(self) -> float:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: float) -> None:
if type(value) is not float:
raise PropertyTypeError("Float", value)
self._value = value
@property
def default(self) -> int:
"""The default value of the property (read-only)."""
return self._default
def copy(self) -> 'Float':
"""Create a copy of this Float Property.
:returns: A copy of this Float
"""
f = Float(self._default)
f._value = self._value
return f
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Float Property.
The value being unpacked must be either a float or an int, otherwise
an error is written to the context and False is returned.
:param value: The value to unpack
:p
|
elegion/djangodash2013
|
wtl/wtlib/tests/models.py
|
Python
|
mit
| 2,921
| 0
|
from __future__ import unicode_literals
from django.test import TestCase
from wtl.wtlib.models impo
|
rt Library, LibraryVersion
from wtl.wtlib.tests.factories import (LibraryFactory, LibraryVersionFacto
|
ry,
ProjectFactory)
class LibraryTestCase(TestCase):
def test_str(self):
x = LibraryFactory()
self.assertEqual(str(x), x.name)
class LibraryVersionTestCase(TestCase):
def test_str(self):
x = LibraryVersionFactory()
self.assertEqual(str(x), x.library.name + ' ' + x.version)
def test_update_totals(self):
l1 = LibraryFactory(name='l1')
l1v1 = LibraryVersionFactory(library=l1, version="1")
l1v2 = LibraryVersionFactory(library=l1, version="2")
l2 = LibraryFactory(name='l2')
l2v1 = LibraryVersionFactory(library=l2, version="1")
l2v2 = LibraryVersionFactory(library=l2, version="2")
p = ProjectFactory()
p.libraries.add(l1v1)
p.libraries.add(l1v2)
p.libraries.add(l2v1)
LibraryVersion.update_totals(project=p)
self.assertEqual(Library.objects.get(id=l1.id).total_users, 2)
self.assertEqual(Library.objects.get(id=l2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v2.id).total_users, 0)
def test_often_used_with(self):
lib1 = LibraryFactory()
lib2 = LibraryFactory()
lib3 = LibraryFactory()
lib4 = LibraryFactory()
ver1 = LibraryVersionFactory(library=lib1)
project_1_2 = ProjectFactory()
project_1_2.libraries.add(ver1)
project_1_2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_2__2 = ProjectFactory()
project_1_2__2.libraries.add(ver1)
project_1_2__2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_3 = ProjectFactory()
project_1_3.libraries.add(LibraryVersionFactory(library=lib1))
project_1_3.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4 = ProjectFactory()
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib2))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib4))
lib1_result = lib1.often_used_with()
self.assertEqual(lib2.name, lib1_result[0].name)
self.assertEqual(2, lib1_result[0].usage_count)
self.assertEqual(lib3.name, lib1_result[1].name)
self.assertEqual(1, lib1_result[1].usage_count)
class ProjectTestCase(TestCase):
def test_str(self):
x = ProjectFactory()
self.assertEqual(str(x), x.name)
|
rehandalal/morgoth
|
morgoth/wsgi.py
|
Python
|
mpl-2.0
| 296
| 0
|
imp
|
ort os
from configurations.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "morgoth.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
application = DjangoWhiteNoise
|
(get_wsgi_application())
|
itziakos/trait-documenter
|
trait_documenter/trait_documenter.py
|
Python
|
bsd-3-clause
| 4,946
| 0.000404
|
#----------------------------------------------------------------------------
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#----------------------------------------------------------------------------
from __future__ import unicode_literals
import ast
import traceback
import sys
import inspect
from _ast import ClassDef, Assign
from sphinx.ext.autodoc import ClassLevelDocumenter
from traits.has_traits import MetaHasTraits
from traits.trait_handlers import TraitType
def is_class_trait(name, cls):
""" Check if the name is in the list of class defined traits of ``cls``.
"""
return isinstance(cls, MetaHasTraits) and name in cls.__class_traits__
class TraitDocumenter(ClassLevelDocumenter):
""" Specialized Documenter subclass for trait attributes.
The class defines a new documenter that recovers the trait definition
signature of module level and class level traits.
To use the documenter, append the module path in the extension
attribute of the `conf.py`.
.. warning::
Using the TraitDocumenter in conjunction with TraitsDoc is not
advised.
"""
objtype = 'traitattribute'
directivetype = 'attribute'
member_order = 60
# must be higher than other attribute documenters
priority = 12
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
""" Check that the documented member is a trait instance.
"""
return (
isattr and
issubclass(type(member), TraitType) or
is_class_trait(membername, parent.object))
def document_members(self, all_members=False):
# Trait attributes have no members """
pass
def add_content(self, more_content, no_docstring=False):
# Never try to get a docstring from the trait object.
ClassLevelDocumenter.add_content(
self, more_content, no_docstring=True)
def import_object(self):
""" Get the Trait object.
Notes
-----
Code adapted from autodoc.Documenter.import_object.
"""
try:
__import__(self.modname)
current = self.module = sys.modules[self.modname]
for part in self.objpath[:-1]:
current = self.get_attr(current, part)
name = self.objpath[-1]
self.object_name = name
self.object = None
self.parent = current
return True
# this used to only catch SyntaxError, ImportError and
|
# AttributeError, but importing modules with side effects can raise
# all kinds of errors.
except Exception as err:
if self.env.app and not self.env.app.quiet:
|
self.env.app.info(traceback.format_exc().rstrip())
msg = (
'autodoc can\'t import/find {0} {r1}, it reported error: '
'"{2}", please check your spelling and sys.path')
self.directive.warn(msg.format(
self.objtype, str(self.fullname), err))
self.env.note_reread()
return False
def add_directive_header(self, sig):
""" Add the sphinx directives.
Add the 'attribute' directive with the annotation option
set to the trait definition.
"""
ClassLevelDocumenter.add_directive_header(self, sig)
definition = self.get_trait_definition()
self.add_line(
' :annotation: = {0}'.format(definition), '<autodoc>')
def get_trait_definition(self):
""" Retrieve the Trait attribute definition
"""
# Get the class source and tokenize it.
source = inspect.getsource(self.parent)
nodes = ast.parse(source)
for node in ast.iter_child_nodes(nodes):
if isinstance(node, ClassDef):
parent_node = node
break
else:
return ''
for node in ast.iter_child_nodes(parent_node):
if isinstance(node, Assign):
name = node.targets[0]
if name.id == self.object_name:
break
else:
return ''
endlineno = name.lineno
for item in ast.walk(node):
if hasattr(item, 'lineno'):
endlineno = max(endlineno, item.lineno)
definition_lines = [
line.strip()
for line in source.splitlines()[name.lineno-1:endlineno]]
definition = ''.join(definition_lines)
equal = definition.index('=')
return definition[equal + 1:].lstrip()
|
maat25/brain-decoding
|
classifier/knn-correlation.py
|
Python
|
mit
| 427
| 0.04918
|
from scipy.spatial.distance import cdist
import numpy as np
class KNNC1(object):
def fit(self, X, Y):
self.X = X
self.Y = Y
def predict(self, Z):
dists = cdist(self.X,
|
Z, 'correlation')
indices = dists.argmin(axis = 0)
return self.Y[indices]
def predict_proba(self, Z):
predictions = self.predict(Z)
result = np.zeros((Z.shape[0], np.unique(self.Y).size
|
))
result[:,predictions-1] = 1
return result
|
won0089/oppia
|
core/domain/collection_domain.py
|
Python
|
apache-2.0
| 22,777
| 0
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for a collection and its constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used.
"""
__author__ = 'Ben Henning'
import copy
import feconf
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS = 'prerequisite_skills'
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS = 'acquired_skills'
# This takes an additional 'exploration_id' parameter.
CMD_ADD_COLLECTION_NODE = 'add_collection_node'
# This takes an additional 'exploration_id' parameter.
CMD_DELETE_COLLECTION_NODE = 'delete_collection_node'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_NODE_PROPERTY = 'edit_collection_node_property'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_PROPERTY = 'edit_collection_property'
# This takes additional 'from_version' and 'to_version' parameters for logging.
CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version'
class CollectionChange(object):
"""Domain object class for a change to a collection.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
collection snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
COLLECTION_NODE_PROPERTIES = (
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS,
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS)
COLLECTION_PROPERTIES = ('title', 'category', 'objective')
def __init__(self, change_dict):
"""Initializes an Collectio
|
nChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_collection_node' (with exploration_id)
- 'delete_collection_node' (with ex
|
ploration_id)
- 'edit_collection_node_property' (with exploration_id,
property_name, new_value and, optionally, old_value)
- 'edit_collection_property' (with property_name, new_value and,
optionally, old_value)
- 'migrate_schema' (with from_version and to_version)
For a collection node, property_name must be one of
COLLECTION_NODE_PROPERTIES. For a collection, property_name must be
one of COLLECTION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == CMD_ADD_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_DELETE_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_EDIT_COLLECTION_NODE_PROPERTY:
if (change_dict['property_name'] not in
self.COLLECTION_NODE_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.exploration_id = change_dict['exploration_id']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_EDIT_COLLECTION_PROPERTY:
if (change_dict['property_name'] not in
self.COLLECTION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION:
self.from_version = change_dict['from_version']
self.to_version = change_dict['to_version']
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class CollectionCommitLogEntry(object):
"""Value object representing a commit to an collection."""
def __init__(
self, created_on, last_updated, user_id, username, collection_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.collection_id = collection_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""This omits created_on, user_id and (for now) commit_cmds."""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'collection_id': self.collection_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class CollectionNode(object):
"""Domain object describing a node in the exploration graph of a
collection. The node contains various information, including a reference to
an exploration (its ID), prerequisite skills in order to be qualified to
play the exploration, and acquired skills attained once the exploration is
completed.
"""
"""Constructs a new CollectionNode object.
Args:
exploration_id: A valid ID of an exploration referenced by this node.
prerequisite_skills: A list of skills (strings).
acquired_skills: A list of skills (strings).
"""
def __init__(self, exploration_id, prerequisite_skills, acquired_skills):
self.exploration_id = exploration_id
self.prerequisite_skills = prerequisite_skills
self.acquired_skills = acquired_skills
def to_dict(self):
return {
'exploration_id': self.exploration_id,
'prerequisite_skills': self.prerequisite_skills,
'acquired_skills': self.acquired_skills
}
@classmethod
def from_dict(cls, node_dict):
return cls(
copy.deepcopy(node_dict['exploration_id']),
copy.deepcopy(node_dict['prerequisite_skills']),
copy.deepcopy(node_dict['acquired_skills']))
@property
def skills(self):
"""Returns a set of skills where each prerequisite and acquired skill
in this collection node is represented at most once.
"""
return set(self.prerequisite_skills) | set(self.acquired_skills)
def update_prerequisite_skills(self, prerequisite_skills):
self.prerequisite_skills = copy.deepcopy(prerequisite_skills)
def update_acquire
|
PaballoDitshego/grassroot-platform
|
docs/tests/vote_requests.py
|
Python
|
bsd-3-clause
| 2,148
| 0.021881
|
__author__ = 'aakilomar'
import requests, json, time
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
#from rest_requests import add_user
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def add_group(userid,phonenumbers):
post_url = host + "/api/group/add/" + str(userid) + "/" + phonenumbers
return requests.post(post_url,None, verify=False).json()
#/add/{userId}/{groupId}/{issue}
def add_vote(userid,groupid,issue):
post_url = host + "/api/vote/add/" + str(userid) + "/" + str(groupid) + "/" + issue
return requests.post(post_url,None, verify=False).json()
def vote_list():
list_url = host + "/api/vote/listallfuture"
r = requests.get(list_url)
print r.json
print r.text
def set_event_time(eventid,time):
post_url = host + "/api/event/settime/" + str(eventid) + "/" + time
return requests.post(post_url,None, verify=False).json()
def rsvp(eventi
|
d,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def add_user_to
|
_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def manualreminder(eventid,message):
post_url = host + "/api/event/manualreminder/" + str(eventid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
user = add_user("0826607134")
group = add_group(user['id'],"0821111111")
user2 = add_user("0821111112")
group = add_user_to_group(user2['id'],group['id'])
print user
print group
issue = add_vote(user['id'], group['id'],"test vote")
print issue
#future_votes = vote_list()
#print future_votes
issue = set_event_time(issue['id'],"30th 7pm")
r = rsvp(issue['id'],user['id'],"yes")
r2 = rsvp(issue['id'],user2['id'],"no")
r = rsvp(issue['id'],user['id'],"yes")
ok = manualreminder(issue['id'],"|") # should use reminder mesage
ok = manualreminder(issue['id'],"my manual messsage")
|
lucc/alot
|
alot/db/envelope.py
|
Python
|
gpl-3.0
| 13,285
| 0
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import glob
import logging
import os
import re
import email
import email.policy
from email.encoders import encode_7or8bit
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import email.charset as charset
import gpg
from .attachment import Attachment
from
|
.. import __version__
from .. import helper
from .. import crypto
from ..settings.const import settings
from ..errors import GPGProblem, GPGCode
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
class Envelope(object):
"""a message that is not yet sent and still editable.
It holds references to unencoded! body text and mail headers among other
things. Envelope implements the python container API for easy access of
header values. So `e['To']`, `e['To'] = 'foo@
|
bar.baz'` and
'e.get_all('To')' would work for an envelope `e`..
"""
headers = None
"""
dict containing the mail headers (a list of strings for each header key)
"""
body = None
"""mail body as unicode string"""
tmpfile = None
"""template text for initial content"""
attachments = None
"""list of :class:`Attachments <alot.db.attachment.Attachment>`"""
tags = []
"""tags to add after successful sendout"""
def __init__(
self, template=None, bodytext=None, headers=None, attachments=None,
sign=False, sign_key=None, encrypt=False, tags=None, replied=None,
passed=None):
"""
:param template: if not None, the envelope will be initialised by
:meth:`parsing <parse_template>` this string before
setting any other values given to this constructor.
:type template: str
:param bodytext: text used as body part
:type bodytext: str
:param headers: unencoded header values
:type headers: dict (str -> [unicode])
:param attachments: file attachments to include
:type attachments: list of :class:`~alot.db.attachment.Attachment`
:param tags: tags to add after successful sendout and saving this msg
:type tags: list of str
:param replied: message being replied to
:type replied: :class:`~alot.db.message.Message`
:param passed: message being passed on
:type replied: :class:`~alot.db.message.Message`
"""
logging.debug('TEMPLATE: %s', template)
if template:
self.parse_template(template)
logging.debug('PARSED TEMPLATE: %s', template)
logging.debug('BODY: %s', self.body)
self.body = bodytext or u''
# TODO: if this was as collections.defaultdict a number of methods
# could be simplified.
self.headers = headers or {}
self.attachments = list(attachments) if attachments is not None else []
self.sign = sign
self.sign_key = sign_key
self.encrypt = encrypt
self.encrypt_keys = {}
self.tags = tags or [] # tags to add after successful sendout
self.replied = replied # message being replied to
self.passed = passed # message being passed on
self.sent_time = None
self.modified_since_sent = False
self.sending = False # semaphore to avoid accidental double sendout
def __str__(self):
return "Envelope (%s)\n%s" % (self.headers, self.body)
def __setitem__(self, name, val):
"""setter for header values. This allows adding header like so:
envelope['Subject'] = u'sm\xf8rebr\xf8d'
"""
if name not in self.headers:
self.headers[name] = []
self.headers[name].append(val)
if self.sent_time:
self.modified_since_sent = True
def __getitem__(self, name):
"""getter for header values.
:raises: KeyError if undefined
"""
return self.headers[name][0]
def __delitem__(self, name):
del self.headers[name]
if self.sent_time:
self.modified_since_sent = True
def __contains__(self, name):
return name in self.headers
def get(self, key, fallback=None):
"""secure getter for header values that allows specifying a `fallback`
return string (defaults to None). This returns the first matching value
and doesn't raise KeyErrors"""
if key in self.headers:
value = self.headers[key][0]
else:
value = fallback
return value
def get_all(self, key, fallback=None):
"""returns all header values for given key"""
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value
def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True
def attach(self, attachment, filename=None, ctype=None):
"""
attach a file
:param attachment: File to attach, given as
:class:`~alot.db.attachment.Attachment` object or path to a file.
:type attachment: :class:`~alot.db.attachment.Attachment` or str
:param filename: filename to use in content-disposition.
Will be ignored if `path` matches multiple files
:param ctype: force content-type to be used for this attachment
:type ctype: str
"""
if isinstance(attachment, Attachment):
self.attachments.append(attachment)
elif isinstance(attachment, str):
path = os.path.expanduser(attachment)
part = helper.mimewrap(path, filename, ctype)
self.attachments.append(Attachment(part))
else:
raise TypeError('attach accepts an Attachment or str')
if self.sent_time:
self.modified_since_sent = True
def construct_mail(self):
"""
compiles the information contained in this envelope into a
:class:`email.Message`.
"""
# Build body text part. To properly sign/encrypt messages later on, we
# convert the text to its canonical format (as per RFC 2015).
canonical_format = self.body.encode('utf-8')
textpart = MIMEText(canonical_format, 'plain', 'utf-8')
# wrap it in a multipart container if necessary
if self.attachments:
inner_msg = MIMEMultipart()
inner_msg.attach(textpart)
# add attachments
for a in self.attachments:
inner_msg.attach(a.get_mime_representation())
else:
inner_msg = textpart
if self.sign:
plaintext = inner_msg.as_bytes(policy=email.policy.SMTP)
logging.debug('signing plaintext: %s', plaintext)
try:
signatures, signature_str = crypto.detached_signature_for(
plaintext, [self.sign_key])
if len(signatures) != 1:
raise GPGProblem("Could not sign message (GPGME "
"did not return a signature)",
code=GPGCode.KEY_CANNOT_SIGN)
except gpg.errors.GPGMEError as e:
if e.getcode() == gpg.errors.BAD_PASSPHRASE:
# If GPG_AGENT_INFO is unset or empty, the user just does
# not have gpg-agent running (properly).
if os.environ.get('GPG_AGENT_INFO', '').strip() == '':
msg = "Got invalid passphrase and GPG_AGENT_INFO\
not set. Please set up gpg-agent."
raise GPGProblem(msg, code=GPGCode.BAD_PASSPHRASE)
else:
raise GPGProblem("Bad passphrase. Is gpg-agent "
"running?",
|
quantrocket-llc/quantrocket-client
|
quantrocket/ibg.py
|
Python
|
apache-2.0
| 6,653
| 0.002856
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import getpass
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def get_credentials(gateway):
"""
Returns username and trading mode (paper/live) for IB Gateway.
Parameters
----------
gateway : str, required
name of IB Gateway service to get credentials for (for example, 'ibg1')
Returns
-------
dict
credentials
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
response = houston.get("/{0}/credentials".format(gateway))
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def set_credentials(gateway, username=None, password=None, trading_mode=None):
"""
Set username/password and trading mode (paper/live) for IB Gateway.
Can be used to set new credentials or switch between paper and live trading
(must have previously entered live credentials). Setting new credentials will
restart IB Gateway and takes a moment to complete.
Credentials are encrypted at rest and never leave your deployment.
Parameters
----------
gateway : str, required
name of IB Gateway service to set credentials for (for example, 'ibg1')
username : str, optional
IBKR username (optional if only modifying trading environment)
password : str, optional
IBKR password (if omitted and username is provided, will be prompted
for password)
trading_mode : str, optional
the trading mode to use ('paper' or 'live')
Returns
-------
dict
status message
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
if username and not password:
password = getpass.getpass(prompt="Enter IBKR Password: ")
data = {}
if username:
data["username"] = username
if password:
data["password"] = password
if trading_mode:
data["trading_mode"] = trading_mode
response = houston.put("/{0}/credentials".format(gateway), data=data, timeout=180)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_get_or_set_credentials(*args, **kwargs):
if kwargs.get("username", None) or kwargs.get("password", None) or kwargs.get("trading_mode", None):
return json_to_cli(set_credentials, *args, **kwargs)
else:
return json_to_cli(get_credentials, gateway=kwargs.get("gateway", None))
def list_gateway_statuses(status=None, gateways=None):
"""
Query statuses of IB Gateways.
Parameters
----------
status : str, optional
limit to IB Gateways in this status. Possible choices: running, stopped, error
gateways : list of str, optional
limit to these IB Gateways
Returns
-------
dict of gateway:status (if status arg not provided), or list of gateways (if status arg provided)
"""
params = {}
if gateways:
params["gateways"] = gateways
if status:
params["status"] = status
response = houston.get("/ibgrouter/gateways", params=params)
houston.raise_for_status_with_json(response)
return r
|
esponse.json()
def _cli_list_gateway_statuses(*args, **k
|
wargs):
return json_to_cli(list_gateway_statuses, *args, **kwargs)
def start_gateways(gateways=None, wait=False):
"""
Start one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to start before returning (default is to start
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.post("/ibgrouter/gateways", params=params, timeout=120)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_start_gateways(*args, **kwargs):
return json_to_cli(start_gateways, *args, **kwargs)
def stop_gateways(gateways=None, wait=False):
"""
Stop one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to stop before returning (default is to stop
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.delete("/ibgrouter/gateways", params=params, timeout=60)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_stop_gateways(*args, **kwargs):
return json_to_cli(stop_gateways, *args, **kwargs)
def load_ibg_config(filename):
"""
Upload a new IB Gateway permissions config.
Permission configs are only necessary when running multiple IB Gateways with
differing market data permissions.
Parameters
----------
filename : str, required
the config file to upload
Returns
-------
dict
status message
"""
with open(filename) as file:
response = houston.put("/ibgrouter/config", data=file.read())
houston.raise_for_status_with_json(response)
return response.json()
def get_ibg_config():
"""
Returns the current IB Gateway permissions config.
Returns
-------
dict
the config as a dict
"""
response = houston.get("/ibgrouter/config")
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def _cli_load_or_show_config(filename=None):
if filename:
return json_to_cli(load_ibg_config, filename)
else:
return json_to_cli(get_ibg_config)
|
b0ttl3z/SickRage
|
sickbeard/metadata/helpers.py
|
Python
|
gpl-3.0
| 1,452
| 0.001377
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__
|
import print_function, unicode_literals
f
|
rom sickbeard import helpers, logger
meta_session = helpers.make_session()
def getShowImage(url, imgNum=None):
if url is None:
return None
# if they provided a fanart number try to use it instead
if imgNum is not None:
tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
else:
tempURL = url
logger.log("Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL, session=meta_session, returns='content')
if image_data is None:
logger.log("There was an error trying to retrieve the image, aborting", logger.WARNING)
return
return image_data
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-azure-mgmt-deploymentmanager/package.py
|
Python
|
lgpl-2.1
| 877
| 0.002281
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtDeploymentmanager(PythonPackage):
"""Microsoft Azure Deployment Manager Client Library for Python."""
homepage = "https://github.com/Azure/a
|
zure-sdk-for-python"
pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip"
version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
|
Mkaysi/weechat
|
doc/docgen.py
|
Python
|
gpl-3.0
| 30,574
| 0.000065
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2015 Sébastien Helleu <flashcode@flashtux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Documentation generator for WeeChat: build include files with commands,
options, infos, infolists, hdata and completions for WeeChat core and
plugins.
Instructions to build config files yourself in WeeChat directories (replace
all paths with your path to WeeChat):
1. run WeeChat and load this script, with following command:
/python load ~/src/weechat/doc/docgen.py
2. change path to build in your doc/ directory:
/set plugins.var.python.docgen.path "~/src/weechat/doc"
3. run docgen command:
/docgen
Note: it is recommended to load only this script when building doc.
Files should be in ~/src/weechat/doc/xx/autogen/ (where xx is language).
"""
from __future__ import print_function
SCRIPT_NAME = 'docgen'
SCRIPT_AUTHOR = 'Sébastien Helleu <flashcode@flashtux.org>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Documentation generator for WeeChat'
SCRIPT_COMMAND = 'docgen'
IMPORT_OK = True
# pylint: disable=wrong-import-position
try:
import gettext
import hashlib
import os
import re
from collections import defaultdict
from operator import itemgetter
except ImportError as message:
print('Missing package(s) for {0}: {1}'.format(SCRIPT_NAME, message))
IMPORT_OK = False
try:
import weechat # pylint: disable=import-error
except ImportError:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: https://weechat.org/')
IMPORT_OK = False
# default path where doc files will be written (should be doc/ in sources
# package tree)
# path must have subdirectories with languages and autogen directory:
# path
# |-- en
# | |-- autogen
# |-- fr
# | |-- autogen
# ...
DEFAULT_PATH = '~/src/weechat/doc'
# list of locales for which we want to build doc files to include
LOCALE_LIST = ('en_US', 'fr_FR', 'it_IT', 'de_DE', 'ja_JP', 'pl_PL')
# all commands/options/.. of following plugins will produce a file
# non-listed plugins will be ignored
# value: "c" = plugin may have many commands
# "o" = write config options for plugin
# if plugin is listed without "c", that means plugin has only one command
# /name (where "name" is name of plugin)
# Note: we consider core is a plugin called "weechat"
PLUGIN_LIST = {
'sec': 'o',
'weechat': 'co',
'alias': '',
'aspell': 'o',
'charset': 'o',
'exec': 'o',
'fifo': 'o',
'irc': 'co',
'logger': 'o',
'relay': 'o',
'script': 'o',
'perl': '',
'python': '',
'javascript': '',
'ruby': '',
'lua': '',
'tcl': '',
'guile': '',
'trigger': 'o',
'xfer': 'co',
}
# options to ignore
IGNORE_OPTIONS = (
r'aspell\.dict\..*',
r'aspell\.option\..*',
r'charset\.decode\..*',
r'charset\.encode\..*',
r'irc\.msgbuffer\..*',
r'irc\.ctcp\..*',
r'irc\.ignore\..*',
r'irc\.server\..*',
r'jabber\.server\..*',
r'logger\.level\..*',
r'logger\.mask\..*',
r'relay\.port\..*',
r'trigger\.trigger\..*',
r'weechat\.palette\..*',
r'weechat\.proxy\..*',
r'weechat\.bar\..*',
r'weechat\.debug\..*',
r'weechat\.notify\..*',
)
# completions to ignore
IGNORE_COMPLETIONS_ITEMS = (
'docgen.*',
'jabber.*',
'weeget.*',
)
class AutogenDoc(object):
"""A class to write auto-generated doc files."""
def __init__(self, directory, doc, name):
"""Initialize auto-generated doc file."""
self.filename = os.path.join(directory, doc, name + '.asciidoc')
self.filename_tmp = self.filename + '.tmp'
self._file = open(self.filename_tmp, 'w')
self.write('//\n')
self.write('// This file is auto-generated by script docgen.py.\n')
self.write('// DO NOT EDIT BY HAND!\n')
self.write('//\n')
def write(self, string):
"""Write a line in auto-generated doc file."""
self._file.write(string)
def update(self, obj_name, num_files, num_files_updated):
"""Update doc file if needed (if content has changed)."""
# close temp file
self._file.close()
# compute checksum on old file
try:
with open(self.filename, 'r') as _file:
shaold = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shaold = ''
# compute checksum on new (temp) file
try:
with open(self.filename_tmp, 'r') as _file:
shanew = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shanew = ''
# compare checksums
if shaold != shanew:
# update doc file
if os.path.exists(self.filename):
os.unlink(self.filename)
os.rename(self.filename_tmp, self.filename)
num_files_updated['total1'] += 1
num_files_updated['total2'] += 1
num_files_updated[obj_name] += 1
else:
os.unlink(self.filename_tmp)
# update counters
num_files['total1'] += 1
num_files['total2'] += 1
num_files[obj_name] += 1
def get_commands():
"""
Get list of WeeChat/plugins commands as dictionary with 3 indexes: plugin,
command, xxx.
"""
commands = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'command')
while weechat.infolist_next(infolist):
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
if plugin in PLUGIN_LIST:
command = weechat.infolist_string(infolist, 'command')
if command == plugin or 'c' in PLUGIN_LIST[plugin]:
for key in ('description', 'args', 'args_description',
'completion'):
commands[plugin][command][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return commands
def get_options():
"""
Get list of WeeChat/plugins config options as dictionary with 4 indexes:
config, section, option, xxx.
"""
options = \
defaultdict(lambda: defaultdict(lambda: defaultdict(defaultdict)))
infolist = weechat.infolist_get('option', '', '')
while weechat.infolist_next(infolist):
full_name = weechat.infolist_string(infolist, 'full_name')
if not re.search('|'.join(IGNORE_OPTIONS), full_name):
config = weechat.infolist_string(infolist, 'config_name')
if config in PLUGIN_LIST and 'o' in PLUGIN_LIST[config]:
section = weechat.infolist_string(infolist, 'section_name')
option = weechat.infolist_string(infolist, 'option_name')
for key in ('type', 'string_values', 'default_value',
'description'):
options[config][section][option][key] = \
weechat.infolist_string(infolist, key)
|
for key in ('min', 'max', 'null_value_allowed'):
options[config][section][option][key] = \
weechat.infolist_integer(infolist, key)
weechat.infolist_free(infolist
|
)
return options
def get_infos():
"""
Get list of WeeChat/plugins infos as dictionary with 3 indexes: plugin,
name, xxx.
"""
infos = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'info')
while weechat.infolist_next(infoli
|
sbrandtb/flop
|
flop/dashboard/views.py
|
Python
|
mit
| 320
| 0
|
from django.contrib.auth.decorators import login_required
from django.vi
|
ews.generic import TemplateView
from flop.cooking.forms import MealForm, MealContributionFormSet
from flop.decorators import view_decorator
@view_decorator(login_required)
class IndexView(TemplateView):
template_name = 'dashboard/in
|
dex.html'
|
containers-tools/base
|
base/file.py
|
Python
|
mit
| 3,057
| 0.001636
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import os
import shutil
import grp
import pwd
from cct.module import Module
from cct.lib.file_utils import create_dir
class File(Module):
def copy(self, source, destination):
"""
Copies file.
Args:
source: path to file
destination: path where file should be copied
"""
create_dir(destination)
shutil.copy(source, destination)
def link(self, source, destination):
"""
Creates symbolik link.
Args:
source: path to symbolik link destination
destination: Symbolik link name
"""
create_dir(destination)
os.symlink(source, destination)
def move(self, source, destination):
"""
Moves file.
Args:
source: path to file
destination: path where file should be moved
"""
create_dir(destination)
shutil.move(source, destination)
def remove(self, path):
"""
Removes file.
Args:
source: path to file to be removed
"""
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
def chown(self, owner, group, path, recursive=False):
"""
Change the ownership of a path.
Args:
owner: the owner (numeric or name) to change ownership to
group: the group (numeric or name) to change groupship to
path: the path to operate on
recursive: if path is a directory, recursively change ownership for all
paths within
|
"""
# supplied owner/group might be symbolic (e.g. 'wheel') or numeric.
# Try interpreting symbolically first
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
gid = int(group,0)
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
uid = int(owner,0)
# Beware: argument order is different
|
os.chown(path, uid, gid)
if recursive and os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in (dirnames + filenames):
os.chown(os.path.join(dirpath, f), uid, gid)
def chmod(self, mode, path, recursive=False):
"""
Change the permissions of a path.
Args:
path: the path to operate on
mode: the numeric mode to set
recursive: whether to change mode recursively
"""
mode = int(mode,0)
# Beware: argument order swapped
os.chmod(path, mode)
if recursive and os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in (dirnames + filenames):
os.chmod(os.path.join(dirpath, f), mode)
|
jhawthorn/plugin.video.gomtv.net
|
gomtv.py
|
Python
|
gpl-3.0
| 9,910
| 0.005045
|
import urllib, urllib2, re, cookielib, os, tempfile, json, md5, time
from BeautifulSoup import BeautifulSoup
import proxy
from gomutil import *
class NotLoggedInException(Exception):
pass
def request(url, params=None, headers={}, opener=None):
data = params and urllib.urlencode(params)
req = urllib2.Request(url, data, headers)
if opener:
response = opener.open(req)
else:
response = urllib2.urlopen(req)
r = response.read()
response.close()
return r
class VodSet(object):
def __init__(self, params):
self.params = params
self._fix_params()
self.xml = request('http://gox.gomtv.net/cgi-bin/gox_vod_sfile.cgi', self.params)
def _fix_params(self):
if 'uip' not in self.params:
self.params["uip"] = request('http://www.gomtv.net/webPlayer/getIP.gom')
self.params["adstate"] = "0"
self.params["goxkey"] = "qoaEl"
keys = ["leagueid", "conid", "goxkey", "level", "uno", "uip", "adstate", "vjoinid", "nid"]
hashstr = "".join([self.params[key] for key in keys])
self.params['goxkey'] = md5.new(hashstr).hexdigest()
def get_error(self):
if re.search('purchase_btn', self.xml):
return "Available for ticket holders only."
else:
return "Unknown error"
def _get_href(self):
match = re.search('<REF\s+href="(.+)"\s+reftype="vod"', self.xml)
if match:
href = match.group(1).replace('&', '&').replace(' ', '%20')
remote_ip = re.search("//([0-9.]+)/", href).group(1)
payload = gom_key_payload(remote_ip, self.params)
return (href, remote_ip, payload)
else:
return (None, None, None)
def get_url(self):
href, remote_ip, payload = self._get_href()
return href and "%s&key=%s" % (href, gom_stream_key(remote_ip, payload))
def get_proxy_url(self):
href, remote_ip, payload = self._get_href()
return href and proxy.url(href, payload)
class GOMtv(object):
VODLIST_ORDER_MOST_RECENT = 1
VODLIST_ORDER_MOST_VIEWED = 2
VODLIST_ORDER_MOST_COMMENTED = 3
VODLIST_TYPE_ALL = 0
VODLIST_TYPE_CODE_S = 32
VODLIST_TYPE_CODE_A = 16
VODLIST_TYPE_UP_DOWN = 64
AUTH_GOMTV = 1
AUTH_TWITTER = 2
AUTH_FACEBOOK = 3
LEVEL = {
'EHQ': 65,
'HQ': 60,
'SQ': 6
}
OLDLEVEL = {
'EHQ': 50,
'HQ': 50,
'SQ': 5
}
def __init__(self, cookie_path=None):
self.vod_sets = {}
if cookie_path is None:
cookie_path = "%s%scookies_gomtv.txt" % (tempfile.gettempdir(), os.path.sep)
self.cookie_jar = cookielib.LWPCookieJar(cookie_path)
if not os.path.exists(os.path.dirname(cookie_path)):
os.makedirs(os.path.dirname(cookie_path))
if (os.path.isfile(cookie_path) and os.path.getsize(cookie_path) > 0):
self.cookie_jar.load(cookie_path,True)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
def _request(self, url, data=None, headers={}):
r = request(url, data, headers, opener=self.opener)
# Ugly hack required to fix cookie names.
# Guessing there's some javascript somewhere on that mess of a website
# that uppercases the cookies..?
for cookie in self.cookie_jar:
if cookie.name.startswith("SES_"):
cookie.name = cookie.name.upper()
self.cookie_jar.save(None,True)
return r
def set_cookie(self, name, value):
exp = time.time() + 24 * 60 * 60
cookie = cookielib.Cookie(version=0, name=name, value=value, port=None, port_specified=False,
domain='.gomtv.net', domain_specified=True, domain_initial_dot=True,
path='/', path_specified=True, secure=False, expires=exp,
discard=False, comment=None, comment_url=None, rest={})
self.cookie_jar.set_cookie(cookie)
def login(self, username, password, auth_type=AUTH_GOMTV):
self.cookie_jar.clear()
if auth_type == self.AUTH_GOMTV:
form = {
"mb_username": username,
"mb_password": password,
"cmd": "login",
"rememberme": "1"
}
ret = self._request("https://ssl.gomtv.net/userinfo/loginProcess.gom", form, {'Referer': 'http://www.gomtv.net/'})
cookies = [cookie.name for cookie in self.cookie_jar if cookie.domain == '.gomtv.net']
return 'SES_MEMBERNO' in cookies
elif auth_type == self.AUTH_TWITTER:
data = self._request("http://www.gomtv.net/twitter/redirect.gom?burl=/index.gom")
location = re.search("document.location.replace\(\"(.*)\"\)", data).group(1)
oauth_token = re.search("setCookie\('oauth_token', \"(.*)\"", data).group(1)
oauth_token_secret = re.search("setCookie\('oauth_token_secret', \"(.*)\"", data).group
|
(1)
self.set_cookie("
|
oauth_token", oauth_token)
self.set_cookie("oauth_token_secret", oauth_token_secret)
data = self._request(location)
soup = BeautifulSoup(data)
oauth_token = soup.find("input", {"id": "oauth_token"})["value"]
auth_token = soup.find("input", {"name": "authenticity_token"})["value"]
url = soup.find("form")["action"]
data = self._request(url, {"oauth_token": oauth_token,
"session[username_or_email]": username,
"session[password]": password,
"submit": "Sign in",
"authenticity_token": auth_token})
refresh = re.search('<meta http-equiv="refresh" content="0;url=(.*)">', data)
if refresh is None:
return False
else:
location = refresh.group(1)
data = self._request(location)
return True
elif auth_type == self.AUTH_FACEBOOK:
data = self._request("http://www.gomtv.net/facebook/index.gom?burl=/index.gom")
soup = BeautifulSoup(data)
# already logged in
if data.startswith("<script>"):
return False
url = soup.find("form")["action"]
payload = {}
for field in soup.findAll("input"):
if not field["name"] == "charset_test":
payload[field["name"]] = field["value"]
payload["email"] = username
payload["pass"] = password
data = self._request(url, payload)
if re.search("<title>Logga in", data) is None:
return True
else:
return False
def get_league_list(self):
soup = BeautifulSoup(self._request("http://www.gomtv.net/view/channelDetails.gom?gameid=0"))
leagues = soup.findAll("dl", "league_list")
result = []
for league in leagues:
result.append({"id": league.find("a")["href"].replace("/", ""),
"logo": league.find("img")["src"],
"name": league.find("strong").find(text=True)})
return result
def get_most_recent_list(self, page=1):
return self.get_vod_list(league=None, page=page)
def get_vod_list(self, order=1, page=1, league=None, type=VODLIST_TYPE_ALL):
if league is None:
url = "http://www.gomtv.net/videos/index.gom?page=%d" % (page)
else:
url = "http://www.gomtv.net/%s/vod/?page=%d&order=%d<ype=%d" % (league, page, order, type)
soup = BeautifulSoup(self._request(url))
thumb_links = soup.findAll("td", {"class": ["vod_info", "listOff"]})
nums = soup.findAll("a", "num", href=re.compile("page=[0-9]+"))
if len(nums) > 0:
last = int(re.search("page=([0-9]+)",
nums[-1]["h
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/jockey/xorg_driver.py
|
Python
|
gpl-3.0
| 48
| 0.020833
|
../../../
|
../share/pyshared/jo
|
ckey/xorg_driver.py
|
SNoiraud/gramps
|
gramps/gen/filters/rules/person/_hasrelationship.py
|
Python
|
gpl-2.0
| 3,035
| 0.005601
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.familyreltype import FamilyRelType
#-------------------------------------------------------------------------
#
# HasRelationship
#
#-------------------------------------------------------------------------
class HasRelationship(Rule):
"""Rule that checks for a person who has a particular relationship"""
labels = [ _('Number of relationships:'),
_('Relationship type:'),
_('Number of children:') ]
name = _('People with the <relationships>')
description = _("Matches people with a particular relationship")
category = _('Family filters')
def apply(self,db,person):
rel_type = 0
cnt = 0
num_rel = len(person.get_family_handle_list())
if self.list[1]:
|
specified_type = FamilyRelType()
specified_type.set_from_xml_str(self.list[1])
# count children and look for a relationship type match
for f_id in person.get_family_handle_list():
f = db.get_family_from_handle(f_id)
if f:
cnt = cnt + len(f.get_child_ref
|
_list())
if self.list[1] and specified_type == f.get_relationship():
rel_type = 1
# if number of relations specified
if self.list[0]:
try:
v = int(self.list[0])
except:
return False
if v != num_rel:
return False
# number of childred
if self.list[2]:
try:
v = int(self.list[2])
except:
return False
if v != cnt:
return False
# relation
if self.list[1]:
return rel_type == 1
else:
return True
|
bstrebel/OxAPI
|
test/_attachment.py
|
Python
|
gpl-2.0
| 3,939
| 0.010916
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys, re, json, requests
from oxapi import *
def get_a_task(ox):
folder = ox.get_standard_folder('tasks')
task = list(ox.get_tasks(folder.id))[0]
return task
def upload(bean, args=[{'content':None,'file':None, 'mimetype':'text/plain','name':'attachment.txt'}]):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
ox = bean._ox
url = ox._url('attachment', 'attach')
params = ox._params()
meta = {'module': bean.module_type,
|
#'attached': bean.id,
'folder': bean.folder_id}
counter =
|
0; fields = []
for data in args:
# json metadata
rf = RequestField(name='json_' + str(counter) ,data=json.dumps(meta))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
# content: data or file to read
filename = 'attachment.txt'
mimetype = 'text/plain'
content = None
if 'content' in data:
content = data['content']
else:
if 'file' in data:
filename = data['file']
if os.path.isfile(filename):
with open(filename, 'rb') as fh:
content = fh.read()
if content is None:
#TODO: process error
return None
if 'name' in data:
filename = data['name']
mimetype = 'text/plain'
if 'mimetype' in data:
mimetype = data['mimetype']
rf = RequestField(name='file_' + str(counter), data=content, filename=filename)
rf.make_multipart(content_disposition='form-data',content_type=mimetype)
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
def create_attachment(ox, task):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
url = ox._url('attachment', 'attach')
params = ox._params()
json_0 = {'module': task.module_type,
'attached': task.id,
'folder': task.folder_id}
fields = []
rf = RequestField(name='json_0',data=json.dumps(json_0))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
rf = RequestField(name='file_0', data="TEXT", filename='attachment.txt')
rf.make_multipart(content_disposition='form-data',content_type='text/plain')
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
if __name__ == '__main__':
with OxHttpAPI.get_session() as ox:
task = get_a_task(ox)
# args = [{ 'file':'attachments_module.py' }]
# upload(task, args)
#create_attachment(ox,task)
#attachments = list(ox.get_attachments(task))
attachments = ox.get_attachments(task)
pass
|
cbrepo/django-reversion
|
src/reversion/tests_deprecated.py
|
Python
|
bsd-3-clause
| 25,219
| 0.005512
|
"""Tests for the deprecated version of the django-reversion API."""
from __future__ import with_statement
import datetime
from django.db import models, transaction
from django.test import TestCase
from django.core.management import call_command
import reversion
from reversion.models import Version, Revision, VERSION_ADD, VERSION_CHANGE, VERSION_DELETE
from reversion.revisions import RegistrationError
from reversion.tests import UTC
class ReversionTestModel(models.Model):
"""A test model for reversion."""
name = models.CharField(max_length=100)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
str_pk_gen = 0;
def get_str_pk():
global str_pk_gen
str_pk_gen += 1;
return str(str_pk_gen)
class ReversionTestModelStrPrimary(models.Model):
"""A test model for reversion."""
id = models.CharField(
primary_key = True,
max_length = 100,
default = get_str_pk
)
name = models.CharField(max_length=100)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
class ReversionRegistrationTest(TestCase):
"""Tests the django-reversion registration functionality."""
def setUp(self):
"""Sets up the ReversionTestModel."""
reversion.register(ReversionTestModel)
def testCanRegisterModel(self):
"""Tests that a model can be registered."""
self.assertTrue(reversion.is_registered(ReversionTestModel))
# Check that duplicate registration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.register(ReversionTestModel))
def testCanUnregisterModel(self):
"""Tests that a model can be unregistered."""
reversion.unregister(ReversionTestModel)
try:
self.assertFalse(reversion.is_registered(ReversionTestModel))
# Check that duplicate unregistration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.unregister(ReversionTestModel))
finally:
# Re-register the model.
reversion.register(ReversionTestModel)
def tearDown(self):
"""Tears down the tests."""
reversion.unregister(ReversionTestModel)
class ReversionCreateTest(TestCase):
"""Tests the django-reversion revision creation functionality."""
model = ReversionTestModel
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Register the model.
reversion.register(self.model)
def testCanSaveWithNoRevision(self):
"""Tests that without an active revision, no model is saved."""
test = self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 0)
def testRevisionContextManager(self):
"""Tests that the revision context manager works."""
with reversion.revision:
test = self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
def testRevisionDecorator(self):
"""Tests that the revision function decorator works."""
@reversion.revision.create_on_success
def create_revision():
return self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(create_revision()).count(), 1)
def testRevisionAbandonedOnError(self):
"""Tests that the revision is abandoned on error."""
# Create the first revision.
with reversion.revision:
test = self.model.objects.create(name="test1.0")
# Create the second revision.
try:
with reversion.revision:
test.name = "test1.1"
test.save()
raise Exception()
except:
transaction.rollback()
# Check that there is still only one revision.
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
# Assert the revision is not invalid.
self.assertFalse(reversion.revision._revision_context_manager.is_invalid())
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(self.model)
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
class ReversionCreateStrPrimaryTest(ReversionCreateTest):
model = ReversionTestModelStrPrimary
class ReversionQueryTe
|
st(TestCase):
"""Tests that django-reversion can retrieve revisions using the api."""
model = ReversionTestMod
|
el
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Register the model.
reversion.register(self.model)
# Create some initial revisions.
with reversion.revision:
self.test = self.model.objects.create(name="test1.0")
with reversion.revision:
self.test.name = "test1.1"
self.test.save()
with reversion.revision:
self.test.name = "test1.2"
self.test.save()
def testCanGetVersions(self):
"""Tests that the versions for an obj can be retrieved."""
versions = Version.objects.get_for_object(self.test)
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
def testCanGetUniqueVersions(self):
"""Tests that the unique versions for an objext can be retrieved."""
with reversion.revision:
self.test.save()
versions = Version.objects.get_unique_for_object(self.test)
# Check correct version data.
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
# Check correct number of versions.
self.assertEqual(len(versions), 3)
def testCanGetForDate(self):
"""Tests that the latest version for a particular date can be loaded."""
with self.settings(USE_TZ=True):
self.assertEqual(Version.objects.get_for_date(self.test, datetime.datetime.now(UTC())).field_dict["name"], "test1.2")
def testCanRevert(self):
"""Tests that an object can be reverted to a previous revision."""
oldest = Version.objects.get_for_object(self.test)[0]
self.assertEqual(oldest.field_dict["name"], "test1.0")
oldest.revert()
self.assertEqual(self.model.objects.get().name, "test1.0")
def testCanGetDeleted(self):
"""Tests that deleted objects can be retrieved."""
self.assertEqual(len(Version.objects.get_deleted(self.model)), 0)
# Create and delete another model.
with reversion.revision:
test2 = self.model.objects.create(name="test2.0")
test2.delete()
# Delete the test model.
self.test.delete()
# Ensure that there are now two deleted models.
deleted = Version.objects.get_deleted(self.model)
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[0].field_dict["name"], "test1.2")
self.assertEqual(deleted[1].field_dict["name"], "test2.0")
self.assertEqual(len(deleted), 2)
def testCanRecoverDeleted(self):
"""Tests that a deleted object can be recovered."""
self.test.delete()
# Ensure deleted.
self.assertEqual(self.model.objects.count(), 0)
# Recover.
|
3dfxsoftware/cbss-addons
|
account_analytic_btree/account_analytic_btree.py
|
Python
|
gpl-2.0
| 1,689
| 0
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Juan Carlos Hernandez Funes (info@vauxoo.com)
# Planned by: Moises Augusto Lopez Calderon (info@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_analytic_account(osv.Model):
_inhe
|
rit = 'account.analytic.account'
_order = "parent_left"
_parent_order = "code"
_parent_store = True
_columns = {
'parent_right': fields.integer('Parent Right',
|
select=1),
'parent_left': fields.integer('Parent Left', select=1),
}
|
akshmakov/Dolfin-Fijee-Fork
|
test/unit/book/python/chapter_1_files/stationary/poisson/dnr_p2D.py
|
Python
|
lgpl-3.0
| 3,020
| 0.004305
|
"""
FEniCS tutorial demo program: Poisson equation with Dirichlet,
Neumann and Robin conditions.
The solution is checked to coincide with the exact solution at all nodes.
The file is a modification of dn2_p2D.py. Note that the boundary is now also
split into two distinct parts (separate objects and integrations)
and we have a Robin condition instead of a Neumann condition at y=0.
"""
from dolfin import *
import numpy
#-------------- Preprocessing step -----------------
# Create mesh and define function space
mesh = UnitSquareMesh(3, 2)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary segments for Neumann, Robin and Dirichlet conditions
# Create mesh function over cell facets
boundary_parts = MeshFunction("size_t", mesh, mesh.topology().dim()-1)
# Mark lower boundary facets as subdomain 0
class LowerRobinBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[1]) < tol
Gamma_R = LowerRobinBoundary()
Gamma_R.mark(boundary_parts, 0)
q = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
p = Constant(100) # arbitrary function can go here
# Mark upper boundary facets as subdomain 1
class UpperNeumannBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[1] - 1) < tol
Gamma_N = UpperNeumannBoundary()
Gamma_N.mark(boundary_parts, 1)
g = Expression('-4*x[1]')
# Mark left boundary as subdomain 2
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0]) < tol
Gamma_0 = LeftBoundary()
Gamma_0.mark(boundary_parts, 2)
# Mark right boundary as subdomain 3
class RightBoundary(SubDomain):
def inside(self, x, on_boundary
|
):
tol = 1E-14 # tolerance for coordinate comparisons
|
return on_boundary and abs(x[0] - 1) < tol
Gamma_1 = RightBoundary()
Gamma_1.mark(boundary_parts, 3)
#-------------- Solution and problem definition step -----------------
# given mesh and boundary_parts
u_L = Expression('1 + 2*x[1]*x[1]')
u_R = Expression('2 + 2*x[1]*x[1]')
bcs = [DirichletBC(V, u_L, boundary_parts, 2),
DirichletBC(V, u_R, boundary_parts, 3)]
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
a = inner(nabla_grad(u), nabla_grad(v))*dx + p*u*v*ds(0)
L = f*v*dx - g*v*ds(1) + p*q*v*ds(0)
# Compute solution
A = assemble(a, exterior_facet_domains=boundary_parts)
b = assemble(L, exterior_facet_domains=boundary_parts)
for condition in bcs: condition.apply(A, b)
# Alternative is not yet supported
#A, b = assemble_system(a, L, bc, exterior_facet_domains=boundary_parts)
u = Function(V)
solve(A, u.vector(), b, 'lu')
print mesh
# Verification
u_exact = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
u_e = interpolate(u_exact, V)
print 'Max error:', abs(u_e.vector().array() - u.vector().array()).max()
#interactive()
|
thuck/proc
|
proc/consoles.py
|
Python
|
lgpl-3.0
| 1,126
| 0.000888
|
from .basic import ProcFile
from collections import namedtuple
class Consoles(ProcFile):
filename = '/proc/consoles'
Console = namedtuple('Console', ['operations', 'flags', 'major', 'minor'])
def names(self):
return [line.split()[0] for line in self._readfile()]
def get(self, name, default=None):
for line in self._readfile():
console_in
|
fo = line.replace('(', '').replace(')', '').split()
if name == console_info[0]:
major, minor = console_info[-1].split(':')
return [console_info[1],
''.join(console_info[2:-1]), major, minor
]
else:
return default
def __getattr__(self, name):
if name in self.n
|
ames():
return self.Console(*tuple(self.get(name)))
else:
raise AttributeError
if __name__ == '__main__':
CONSOLES = Consoles()
print(CONSOLES.names())
print(CONSOLES.get('tty0'))
print(CONSOLES.tty0.operations)
print(CONSOLES.tty0.flags)
print(CONSOLES.tty0.major)
print(CONSOLES.tty0.minor)
|
JeroenBosmans/nabu
|
nabu/neuralnetworks/trainers/cost_features_rec.py
|
Python
|
mit
| 1,705
| 0.002346
|
'''@file cross_enthropytrainer_rec.py
contains the CrossEnthropyTrainerRec for reconstruction of the audio samples'''
import tensorflow as tf
import trainer
from nabu.neuralnetworks import ops
class CostFeaturesRec(trainer.Trainer):
'''A trainer that minimises the cross-enthropy loss, the output sequences
must be of the same length as the input sequences'''
def compute_loss(self, targets, logits, logit_seq_length,
target_seq_length):
'''
Compute the loss
Creates the operation to compute the cross-entropy loss for every input
frame (if you want to have a different loss function, overwrite this
method)
Args:
targets: a tupple of targets, the first one being a
[batch_size x max_target_length] tensor containing the real
targets, the second one being a [batch_size x max_audioseq_length x dim]
tensor containing the audio samples or other extra information.
logits: a tuple of [batch_size, max_logit_length, dim] tensors
containing the logits for the text and the audio samples
logit_seq_l
|
ength: the length of all the logit sequences as a tuple of
[batch_size] vectors
target_seq_length: the length of all the target sequences as a
tupple of two [batch_size] vectors, both for one of the elements
in the targets tupple
Returns:
a scalar value containing the loss
'''
with tf.name_scope('cross_entropy_loss'):
total_loss = ops.mse(targets[1], logits[1], target_seq_length[1])
|
return total_loss
|
abn/python-bugzilla
|
examples/query.py
|
Python
|
gpl-2.0
| 3,441
| 0.000872
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
# query.py: Perform a few varieties of queries
from __future__ import print_function
import time
import bugzilla
# public test instance of bugzilla.redhat.com. It's okay to make changes
URL = "partner-bugzilla.redhat.com"
bzapi = bugzilla.Bugzilla(URL)
# build_query is a helper function that handles some bugzilla version
# incompatibility issues. All it does is return a properly formatted
# dict(), and provide friendly parameter names. The param names map
# to those accepted by XMLRPC Bug.search:
# https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#search-bugs
query = bzapi.build_query(
product="Fedora",
component="python-bugzilla")
# Since 'query' is just a dict, you could set your own parameters too, like
# if your bugzilla had a custom field. This will set 'status' for example,
# but for common opts it's better to use build_query
query["status"] = "CLOSED"
# query() is what actually performs the query. it's a wrapper around Bug.search
t1 = time.time()
bugs = bzapi.query(query)
t2 = time.time()
print("Found %d bugs with our query" % len(bugs))
print("Query processing time: %s" % (t2 - t1))
# Depending on the size of your query, you can massively speed things up
# by telling bugzilla to only return the fields you care about, since a
# large chunk of the return time is transmitting the extra bug data. You
# tweak this with include_fields:
# https://wiki.mozilla.org/Bugzilla:BzAPI#Field_Control
# Bugzilla will only return those fields listed in include_fields.
query
|
= bzapi.build_query(
product="Fedora",
component="python-bugzilla",
include_fields=["id", "summary"])
t1 = time.time()
bugs = bzapi.query(query)
t2 = time.time()
print("Quicker query processing time: %s" % (t2 - t1))
# bugzilla.redhat.com, and bugzilla >= 5.0 support queries using the same
# format as is used for 'advanced' search URLs via the Web UI. For example,
# I go to p
|
artner-bugzilla.redhat.com -> Search -> Advanced Search, select
# Classification=Fedora
# Product=Fedora
# Component=python-bugzilla
# Unselect all bug statuses (so, all status values)
# Under Custom Search
# Creation date -- is less than or equal to -- 2010-01-01
#
# Run that, copy the URL and bring it here, pass it to url_to_query to
# convert it to a dict(), and query as usual
query = bzapi.url_to_query("https://partner-bugzilla.redhat.com/"
"buglist.cgi?classification=Fedora&component=python-bugzilla&"
"f1=creation_ts&o1=lessthaneq&order=Importance&product=Fedora&"
"query_format=advanced&v1=2010-01-01")
query["include_fields"] = ["id", "summary"]
bugs = bzapi.query(query)
print("The URL query returned 22 bugs... "
"I know that without even checking because it shouldn't change!... "
"(count is %d)" % len(bugs))
# One note about querying... you can get subtley different results if
# you are not logged in. Depending on your bugzilla setup it may not matter,
# but if you are dealing with private bugs, check bzapi.logged_in setting
# to ensure your cached credentials are up to date. See update.py for
# an example usage
|
apertus-open-source-cinema/elmyra
|
src/python/lib/media.py
|
Python
|
gpl-3.0
| 974
| 0
|
"""Methods to set media related (resolution, length) scene properties"""
import bpy
def setup(animated, width, height, length):
"""
Sets up the type, resolution and length of the currently open scene
The render resolution of the scene is set, and additionally ...
... for stills, sets the length of the scene to exactly 1 frame.
... for animations, enables animated seed for cycles, sets the fps to
24 and the fps_base to 1.
"""
bpy.context.scene
|
.render.resolution_percentage = 100
bpy.context.scene.render.resolution_x = int(width)
bpy.context.scene.render.resolution_y = int(height)
if not animated:
bpy.context.scene.frame_end = 1
else:
bpy.context.scene.cycles.use_animated_seed = True
bpy.context.scene.frame_end = length * 24
bpy.context.scene.render.fps = 24
# different for weird framerates (23.9243924 stuffie
|
s you know)
bpy.context.scene.render.fps_base = 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.