repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
hlin/django-auth-krb | setup.py | Python | mit | 1,133 | 0 | #!/usr/bin/env python
from setuptools import setup, find_packages
from django_auth_krb import get_version
setup(
name="django_auth_krb",
version=get_version(),
description="Django kerberos authentication backend",
long_description=open('README.rst').read(),
url="https://github.com/hlin/django-auth-krb/",
author="Hypo Lin",
author_email="hlin.pub@me.com",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT Lice | nse",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords=["django", "kerberos", "krb5", "authentication", "auth"],
packages | =find_packages(exclude='tests'),
include_package_data=True,
install_requires=[
'Django>=1.10.1',
],
zip_safe=False,
)
|
ThunderGemios10/The-Super-Duper-Script-Editor | ui_fontgenerator.py | Python | gpl-3.0 | 13,740 | 0.003712 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt\ui\fontgen.ui'
#
# Created: Mon Jun 03 01:17:17 2013
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FontGenerator(object):
def setupUi(self, FontGenerator):
FontGenerator.setObjectName(_fromUtf8("FontGenerator"))
FontGenerator.resize(570, 493)
FontGenerator.setWindowTitle(QtGui.QApplication.translate("FontGenerator", "Font Generator - untitled[*]", None, QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/monokuma-green.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
FontGenerator.setWindowIcon(icon)
self.verticalLayout = QtGui.QVBoxLayout(FontGenerator)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnNew = QtGui.QPushButton(FontGenerator)
self.btnNew.setText(QtGui.QApplication.translate("FontGenerator", "&New", None, QtGui.QApplication.UnicodeUTF8))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/report.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnNew.setIcon(icon1)
self.btnNew.setShortcut(QtGui.QApplication.translate("FontGenerator", "Ctrl+N", None, | QtGui.QApplication.UnicodeUTF8))
self.btnNew.setAutoDefault(False)
self.btnNew.setObjectName(_fromUtf8("btnNew"))
self.horizontalLayout_2.addWidget(self.btnNew)
self.btnSave = QtGui.QPushButton(FontGenerator)
self.btnSave.setText(QtGui.QApplication.translate("FontGenerator", "&Save", None, QtGui.QApplication.UnicodeUTF8))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPix | map(_fromUtf8(":/disk.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnSave.setIcon(icon2)
self.btnSave.setShortcut(QtGui.QApplication.translate("FontGenerator", "Ctrl+S", None, QtGui.QApplication.UnicodeUTF8))
self.btnSave.setAutoDefault(False)
self.btnSave.setObjectName(_fromUtf8("btnSave"))
self.horizontalLayout_2.addWidget(self.btnSave)
self.btnSaveAs = QtGui.QPushButton(FontGenerator)
self.btnSaveAs.setText(QtGui.QApplication.translate("FontGenerator", "Save As...", None, QtGui.QApplication.UnicodeUTF8))
self.btnSaveAs.setShortcut(QtGui.QApplication.translate("FontGenerator", "Ctrl+Shift+S", None, QtGui.QApplication.UnicodeUTF8))
self.btnSaveAs.setAutoDefault(False)
self.btnSaveAs.setObjectName(_fromUtf8("btnSaveAs"))
self.horizontalLayout_2.addWidget(self.btnSaveAs)
self.btnLoad = QtGui.QPushButton(FontGenerator)
self.btnLoad.setText(QtGui.QApplication.translate("FontGenerator", "&Open", None, QtGui.QApplication.UnicodeUTF8))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/folder.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnLoad.setIcon(icon3)
self.btnLoad.setShortcut(QtGui.QApplication.translate("FontGenerator", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.btnLoad.setAutoDefault(False)
self.btnLoad.setObjectName(_fromUtf8("btnLoad"))
self.horizontalLayout_2.addWidget(self.btnLoad)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.btnGenerateFont = QtGui.QPushButton(FontGenerator)
self.btnGenerateFont.setText(QtGui.QApplication.translate("FontGenerator", "&Generate", None, QtGui.QApplication.UnicodeUTF8))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/cog.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnGenerateFont.setIcon(icon4)
self.btnGenerateFont.setShortcut(QtGui.QApplication.translate("FontGenerator", "Ctrl+G", None, QtGui.QApplication.UnicodeUTF8))
self.btnGenerateFont.setAutoDefault(False)
self.btnGenerateFont.setObjectName(_fromUtf8("btnGenerateFont"))
self.horizontalLayout_2.addWidget(self.btnGenerateFont)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.line = QtGui.QFrame(FontGenerator)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout.addWidget(self.line)
self.tabFonts = QtGui.QTabWidget(FontGenerator)
self.tabFonts.setTabsClosable(False)
self.tabFonts.setMovable(True)
self.tabFonts.setObjectName(_fromUtf8("tabFonts"))
self.verticalLayout.addWidget(self.tabFonts)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnNewTab = QtGui.QPushButton(FontGenerator)
self.btnNewTab.setText(QtGui.QApplication.translate("FontGenerator", "Add Tab", None, QtGui.QApplication.UnicodeUTF8))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/add.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnNewTab.setIcon(icon5)
self.btnNewTab.setAutoDefault(False)
self.btnNewTab.setObjectName(_fromUtf8("btnNewTab"))
self.horizontalLayout.addWidget(self.btnNewTab)
self.btnRemoveTab = QtGui.QPushButton(FontGenerator)
self.btnRemoveTab.setText(QtGui.QApplication.translate("FontGenerator", "Remove", None, QtGui.QApplication.UnicodeUTF8))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/delete.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRemoveTab.setIcon(icon6)
self.btnRemoveTab.setAutoDefault(False)
self.btnRemoveTab.setObjectName(_fromUtf8("btnRemoveTab"))
self.horizontalLayout.addWidget(self.btnRemoveTab)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.groupBox = QtGui.QGroupBox(FontGenerator)
self.groupBox.setTitle(QtGui.QApplication.translate("FontGenerator", "Export", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout_3.setContentsMargins(-1, 4, -1, 8)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.chkGenForGame = QtGui.QCheckBox(self.groupBox)
self.chkGenForGame.setText(QtGui.QApplication.translate("FontGenerator", "Export to umdimage2", None, QtGui.QApplication.UnicodeUTF8))
self.chkGenForGame.setChecked(True)
self.chkGenForGame.setObjectName(_fromUtf8("chkGenForGame"))
self.verticalLayout_2.addWidget(self.chkGenForGame)
self.chkGenForEditor = QtGui.QCheckBox(self.groupBox)
self.chkGenForEditor.setText(QtGui.QApplication.translate("FontGenerator", "Export to editor GFX dir", None, QtGui.QApplication.UnicodeUTF8))
self.chkGenForEditor.setChecked(True)
self.chkGenForEditor.setObjectName(_fromUtf8("chkGenForEditor"))
self.verticalLayout_2.addWidget(self.chkGenForEditor)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.rdoGenFont1 = QtGui.QRadioButton(self.groupBox)
self.rdoGenFont1.setText(QtGui.QApplication.translate("FontGenerator", "Font 01 (regular text)", None, QtGui.QApplication.UnicodeUTF8))
self.rdoGenFont1.setChecked(True)
self.rdoGenFont1.setObjectName(_fromUtf8("rdoGenFont1"))
|
TaskEvolution/Task-Coach-Evolution | taskcoach/tests/disttests/win32/sendinput/setup.py | Python | gpl-3.0 | 959 | 0 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
| from distutils.core import setup, Extension
setup(name='sendinput',
ext_modules=[Extension('sendinput', ['sendinput.c'],
| define_macros=[('_WIN32_WINNT', '0x0502')])])
|
Designist/pybuilder | src/main/python/pybuilder/errors.py | Python | apache-2.0 | 3,860 | 0.002332 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 20 | 11-2015 PyBuilder Team
#
# | Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder error module.
Defines all possible errors that can arise during the execution of PyBuilder.
"""
class PyBuilderException(Exception):
def __init__(self, message, *arguments):
super(PyBuilderException, self).__init__(message, *arguments)
self._message = message
self._arguments = arguments
@property
def message(self):
return self._message % self._arguments
def __str__(self):
return self.message
class InvalidNameException(PyBuilderException):
def __init__(self, name):
super(InvalidNameException, self).__init__("Invalid name: %s", name)
class NoSuchTaskException(PyBuilderException):
def __init__(self, name):
super(NoSuchTaskException, self).__init__("No such task %s", name)
class CircularTaskDependencyException(PyBuilderException):
def __init__(self, first, second=None, message=None):
if message:
super(CircularTaskDependencyException, self).__init__(message)
elif second:
super(CircularTaskDependencyException, self).__init__("Circular task dependency detected between %s and %s",
first, second)
self.first = first
self.second = second
class MissingPrerequisiteException(PyBuilderException):
def __init__(self, prerequisite, caller="n/a"):
super(
MissingPrerequisiteException, self).__init__("Missing prerequisite %s required by %s",
prerequisite, caller)
class MissingTaskDependencyException(PyBuilderException):
def __init__(self, source, dependency):
super(
MissingTaskDependencyException, self).__init__("Missing task '%s' required for task '%s'",
dependency, source)
class MissingActionDependencyException(PyBuilderException):
def __init__(self, source, dependency):
super(
MissingActionDependencyException, self).__init__("Missing task '%s' required for action '%s'",
dependency, source)
class MissingPluginException(PyBuilderException):
def __init__(self, plugin, message=""):
super(MissingPluginException, self).__init__(
"Missing plugin '%s': %s", plugin, message)
class BuildFailedException(PyBuilderException):
pass
class MissingPropertyException(PyBuilderException):
def __init__(self, property):
super(MissingPropertyException, self).__init__(
"No such property: %s", property)
class ProjectValidationFailedException(BuildFailedException):
def __init__(self, validation_messages):
BuildFailedException.__init__(
self, "Project validation failed: " + "\n-".join(validation_messages))
self.validation_messages = validation_messages
class InternalException(PyBuilderException):
pass
class DependenciesNotResolvedException(InternalException):
def __init__(self):
super(DependenciesNotResolvedException, self).__init__("Dependencies have not been resolved.")
|
OPM/ResInsight | ThirdParty/Ert/python/ecl/util/util/version.py | Python | gpl-3.0 | 3,470 | 0.011816 | from ecl import EclPrototype
def cmp_method(method):
def cmp_wrapper(self, other):
if not isinstance(other, Version):
other = Version(other[0], other[1], other[2])
return method(self, other)
return cmp_wrapper
class Version(object):
def __init__(self, major, minor, micro, git_commit = None, build_time = None):
self.major = major
self.minor = minor
self.micro = micro
try:
self.micro_int = int(micro)
self.is_devel = False
except ValueError:
self.micro_int = -1
self.is_devel = True
self.build_time = build_time
self.git_commit = git_commit
def isDevelVersion(self):
return self.is_devel
def versionString(self):
return "%d.%d.%s" % (self.major, self.minor, self.micro)
def versionTuple(self):
return self.major, self.minor, self.micro
def __cmpTuple(self):
return self.major, self.minor, self.micro_int
def __str__(self):
return self.versionString()
def __repr__(self):
status = 'production'
git_commit = self.getGitCommit( short = True )
if self.is_devel:
status = 'development'
fmt = 'Version(major=%d, minor=%d, micro="%s", commit="%s", status="%s")'
return fmt % (self.major, self.minor, self.micro, git_commit, status)
@cmp_method
def __eq__(self, other):
return self.versionTuple() == other.versionTuple()
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.versionTuple())
# All development versions are compared with micro version == -1;
# i.e. the two versions version(1,2,"Alpha") and
# ecl_version(1,2,"Beta") compare as equal in the >= and <= tests -
# but not in the == test.
@cmp_method
def __ge__(self, other):
return self.__cmpTuple() >= other.__cmpTuple()
@cmp_method
def __lt__(self, other):
return not (self >= other)
@cmp_method
def __le__(self, other):
return self.__cmpTuple() <= other.__cmpTuple()
@cmp_method
def __gt__(self, other):
return not (self <= other)
def getBuildTime(self):
if self.build_time is None:
return "?????"
else:
return self.build_time
def getGitCommit(self, short=False):
if self.git_commit is None:
return "???????"
else:
if short:
return self.git_commit[0:8]
else:
return | self.git_commit
class EclVersion(Version):
_build_time = EclPrototype("char* ecl_version_get_build_time()", bind = False)
_git_commit = EclPrototype("char* ecl_version_get_git_commit()", bind = False)
_major_version = EclPrototype("int ecl_version_get_major_version()", bind = False)
_minor_version = EclPrototype("int ecl_versi | on_get_minor_version()", bind = False)
_micro_version = EclPrototype("char* ecl_version_get_micro_version()", bind = False)
_is_devel = EclPrototype("bool ecl_version_is_devel_version()", bind = False)
def __init__(self):
major = self._major_version( )
minor = self._minor_version( )
micro = self._micro_version( )
git_commit = self._git_commit( )
build_time = self._build_time( )
super( EclVersion, self).__init__( major, minor , micro , git_commit, build_time)
|
kelvindk/Video-Stabilization | boost_1_42_0/tools/regression/src/regression.py | Python | gpl-3.0 | 36,064 | 0.027091 | #!/usr/bin/python
# Copyright MetaCommunications, Inc. 2003-2007
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import glob
import optparse
import os
import os.path
import platform
import sys
import time
#~ Place holder for xsl_reports/util module
utils = None
repo_root = {
'anon' : 'http://svn.boost.org/svn/boost/',
'user' : 'https://svn.boost.org/svn/boost/'
}
repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'tags/tools/jam/Boost_Jam_3_1_17/src',
'regression' : 'trunk/tools/regression',
'boost-build.jam'
: 'trunk/boost-build.jam'
}
class runner:
def __init__(self,root):
commands = map(
lambda m: m[8:].replace('_','-'),
filter(
lambda m: m.startswith('command_'),
runner.__dict__.keys())
)
commands.sort()
commands = "commands: %s" % ', '.join(commands)
opt = optparse.OptionParser(
usage="%prog [options] [commands]",
description=commands)
#~ Base Options:
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--toolsets',
help="comma-separated list of toolsets to test with" )
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--timeout',
help="specifies the timeout, in minutes, for a single test run/compilation",
type='int' )
opt.add_option( '--bjam-o | ptions',
help="options to pass to the regression test" )
opt.add_o | ption( '--bjam-toolset',
help="bootstrap toolset for 'bjam' executable" )
opt.add_option( '--pjl-toolset',
help="bootstrap toolset for 'process_jam_log' executable" )
opt.add_option( '--platform' )
#~ Source Options:
opt.add_option( '--user',
help="Boost SVN user ID" )
opt.add_option( '--local',
help="the name of the boost tarball" )
opt.add_option( '--force-update',
help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run",
action='store_true' )
opt.add_option( '--have-source',
help="do neither a tarball download nor an SVN update; used primarily for testing script changes",
action='store_true' )
#~ Connection Options:
opt.add_option( '--ftp',
help="FTP URL to upload results to." )
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
help="FTP proxy server (e.g. 'ftpproxy')" )
opt.add_option( '--dart-server',
help="the dart server to send results to" )
#~ Debug Options:
opt.add_option( '--debug-level',
help="debugging level; controls the amount of debugging output printed",
type='int' )
opt.add_option( '--send-bjam-log',
help="send full bjam log of the regression run",
action='store_true' )
opt.add_option( '--mail',
help="email address to send run notification to" )
opt.add_option( '--smtp-login',
help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
opt.add_option( '--skip-tests',
help="do not run bjam; used for testing script changes",
action='store_true' )
#~ Defaults
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.toolsets=None
self.incremental=False
self.timeout=5
self.bjam_options=''
self.bjam_toolset=''
self.pjl_toolset=''
self.platform=self.platform_name()
self.user='anonymous'
self.local=None
self.force_update=False
self.have_source=False
self.ftp=None
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
self.debug_level=0
self.send_bjam_log=False
self.mail=None
self.smtp_login=None
self.skip_tests=False
( _opt_, self.actions ) = opt.parse_args(None,self)
if not self.actions or self.actions == []:
self.actions = [ 'regression' ]
#~ Initialize option dependent values.
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
if self.pjl_toolset != 'python':
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
else:
self.regression_log = os.path.join( self.regression_results, 'bjam.xml' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
if sys.platform == 'win32':
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
elif sys.platform == 'cygwin':
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
self.process_jam_log = { 'name' : 'process_jam_log' }
self.bjam = {
'name' : self.bjam['name'],
'build_cmd' : self.bjam_build_cmd,
'path' : os.path.join(self.regression_root,self.bjam['name']),
'source_dir' : self.tools_bjam_root,
'build_dir' : self.tools_bjam_root,
'build_args' : ''
}
self.process_jam_log = {
'name' : self.process_jam_log['name'],
'build_cmd' : self.bjam_cmd,
'path' : os.path.join(self.regression_root,self.process_jam_log['name']),
'source_dir' : os.path.join(self.tools_regression_root,'build'),
'build_dir' : os.path.join(self.tools_regression_root,'build'),
'build_args' : 'process_jam_log -d2'
}
if self.debug_level > 0:
self.log('Regression root = %s'%self.regression_root)
self.log('Boost root = %s'%self.boost_root)
self.log('Regression results = %s'%self.regression_results)
self.log('Regression log = %s'%self.regression_log)
self.log('BB root = %s'%self.tools_bb_root)
self.log('Bjam root = %s'%self.tools_bjam_root)
self.log('Tools root = %s'%self.tools_regression_root)
self.log('XSL reports dir = %s'%self.xsl_reports_dir)
self.log('Timestamp = %s'%self.timestamp_path)
self.log('Patch Boost script = %s'%self.patch_boost)
self.main()
#~ The various commands that make up the testing sequence...
def command_cleanup(self,*args):
if not args or args == None or args == []: args = [ 'source', 'bin' ]
if 'source' in args:
self.log( 'Clean |
stefanbraun-private/pyVisiToolkit | src/trend/datasource/trendExpression.py | Python | gpl-3.0 | 10,667 | 0.023156 | #!/usr/bin/env python
# encoding: utf-8
"""
trend.datasource.trendExpression.py
Evaluate trenddata expressions
Copyright (C) 2017 Stefan Braun
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a cop | y of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DEBUGGING = True
import datetime
import misc.timezone as timezone
from trend.datasource.trendInterpolation import Interpolation
from operator import itemgetter
import collections
import sys
class Variable(object):
TYPE_BOOLEAN = 1
TYPE_INTEGER = 2
TYPE_FLOAT = 3
def __init__(self, projectpath_str, dms_dp_str, var_name_str, interpolation_type | _int, value_type_int):
self._interpolation = Interpolation(projectpath_str, dms_dp_str, interpolation_type_int)
self._var_name_str = var_name_str
self._value_type_int = value_type_int
def get_interpolation(self):
return self._interpolation
def get_var_name(self):
return self._var_name_str
def get_value(self, timestamp_datetime):
if self._value_type_int == Variable.TYPE_BOOLEAN:
return self._interpolation.get_value_as_boolean(timestamp_datetime)
elif self._value_type_int == Variable.TYPE_INTEGER:
return self._interpolation.get_value_as_int(timestamp_datetime)
else:
# default: returning float
return self._interpolation.get_value_as_float(timestamp_datetime)
def get_age(self, timestamp_datetime):
# difference between given timestamp and last available trenddata timestamp in seconds
# =>this "freshness" shows holes in trenddata
return self._interpolation.get_age(timestamp_datetime)
class Expression(object):
_tz = timezone.Timezone().get_tz()
def __init__(self, variables_list):
self._variables_list = variables_list
def _get_timestamps_generator(self, start_datetime=None, stop_datetime=None):
"""
getting generators of all timestamp sources,
then always yield the oldest timestamp of all active timestamp sources
=>this allows comparison of values of all involved variables at all available timestamps
"""
# FIXME: some code sharing with MetaTrendfile.get_dbdata_timestamps_generator()... =>refactor if possible!
if not start_datetime:
start_datetime = datetime.datetime.fromtimestamp(0, tz=Expression._tz)
if not stop_datetime:
stop_datetime = datetime.datetime(year=3000, month=1, day=1).replace(tzinfo=Expression._tz)
class _TStamp_iter_source(object):
"""helper class for timestamp generators"""
def __init__(self, head_elem, iterator):
self.head_elem = head_elem
self.iterator = iterator
tstamp_generator_list = []
for var in self._variables_list:
try:
curr_iter = var.get_interpolation().get_dbdata_timestamps_generator(start_datetime, stop_datetime)
# this object always contains head element from iterator, and iterator itself
new_source = _TStamp_iter_source(curr_iter.next(), curr_iter)
tstamp_generator_list.append(new_source)
except StopIteration:
pass
# request items from all generators, always returning smaller value
while tstamp_generator_list:
# consuming timestamps, returning always oldest one, updating first element
# sorting list of tuples: http://stackoverflow.com/questions/10695139/sort-a-list-of-tuples-by-2nd-item-integer-value
# =>getting source list with oldest timestamp
key_func = lambda tstamp_iter_source: tstamp_iter_source.head_elem.tstamp_dt
tstamp_generator_list = sorted(tstamp_generator_list, key=key_func)
oldest_source_obj = tstamp_generator_list[0]
curr_tstamp_obj = oldest_source_obj.head_elem
yield curr_tstamp_obj
try:
# update head-element of current timestamp source
oldest_source_obj.head_elem = oldest_source_obj.iterator.next()
except StopIteration:
# iterator is empty... =>removing this timestamp-source
tstamp_generator_list = tstamp_generator_list[1:]
def get_evaluation_generator(self, binary_expr_str, start_datetime=None, stop_datetime=None):
"""
evaluate given expression at every available timestamp
"""
if not start_datetime:
start_datetime = datetime.datetime.fromtimestamp(0, tz=Expression._tz)
if not stop_datetime:
stop_datetime = datetime.datetime(year=3000, month=1, day=1).replace(tzinfo=Expression._tz)
# looping through all available timestamps
for tstamp_obj in self._get_timestamps_generator(start_datetime, stop_datetime):
# set test condition for eval(): building local variables dictionary with all values
# updating "age" as maximum of "age" of all variables (higher means less relevant)
curr_age = 0
mylocals = {}
for curr_var in self._variables_list:
var_name = curr_var.get_var_name()
curr_val = curr_var.get_value(tstamp_obj.tstamp_dt)
mylocals[var_name] = curr_val
curr_age = max(curr_age, curr_var.get_age(tstamp_obj.tstamp_dt))
tstamp_obj.age = curr_age
# evaluate given expression with current variable values
try:
# calling eval() mostly safe (according to http://lybniz2.sourceforge.net/safeeval.html )
tstamp_obj.value = eval(binary_expr_str, {}, mylocals)
except Exception as ex:
# current expression contains errors...
print('\tExpression.get_evaluation_generator() throwed exception during evaluation: "' + repr(ex) + '"')
tstamp_obj.value = None
yield tstamp_obj
def get_timespans_while_eval_true_generator(self, binary_expr_str, start_datetime=None, stop_datetime=None, duration_seconds=300, max_age_seconds=900):
"""
evaluate given expression at every available timestamp,
yields Timespan objects containing begin and end timestamp,
when this expression evaluates to True during specific amount of seconds as minimal duration
and all available values are "fresher" than given max_age_seconds
(=>caller has to iterate himself over these timespans;
we got "MemoryError"s when trying to collect lists of all available timestamps)
"""
assert duration_seconds >= 0, 'parameter "duration_seconds" has to be a positive integer'
assert max_age_seconds > 0, 'sane values of maximal age: a bit higher than maximum interval of all involved variables'
class _Timespan(object):
def __init__(self, start_datetime):
self.start_datetime = start_datetime
self.stop_datetime = None
self.nof_tstamps = 0
curr_timespan = None
for tstamp_obj in self.get_evaluation_generator(binary_expr_str, start_datetime, stop_datetime):
if tstamp_obj.value and tstamp_obj.age <= max_age_seconds:
# expression evaluates to True and is "fresh"
if not curr_timespan:
# =>begin of new list
curr_timespan = _Timespan(tstamp_obj.tstamp_dt)
curr_timespan.nof_tstamps += 1
else:
# expression evaluates to False =>return last Timespan object, reset everything
if curr_timespan:
curr_duration = abs((curr_timespan.start_datetime - tstamp_obj.tstamp_dt).total_seconds())
if curr_duration >= duration_seconds:
# found timespan where expression evaluates long enough to True
# =>save it for caller
curr_timespan.stop_datetime = tstamp_obj.tstamp_dt
yield curr_timespan
curr_timespan = None
def get_value_of_variable(self, var_name_str, timestamp_datetime):
"""retrieving interpolated value of given variable at given timestamp"""
# searching variable
for var in self._variables_list:
if var._var_name_str == var_name_str:
return var.get_value(timestamp_datetime)
raise AttributeError('variable "' + var_name_str + '" is unknown to current expression object!')
def main(argv=None):
curr_tz = timezone.Timezone().get_tz()
# evaluate expression over trenddata
my_vars_list = []
my_vars_list.append(Variable(projectpath_str='C:\Promos15\proj\Foo',
dms_dp_str='NS_MSR01a |
anhstudios/swganh | data/scripts/templates/object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s6_gen2.py | Python | mit | 501 | 0.043912 | #### NOTICE: THIS FILE IS AUT | OGENERATED
#### MODIFICATIO | NS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s6_gen2.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_2h_type6")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
tcp813/tolo | settings/constants.py | Python | mit | 18 | 0.055556 | ZOOM_NORMAL = 1.0 | ||
mruddy/bitcoin | test/functional/mempool_accept.py | Python | mit | 16,115 | 0.004034 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from decimal import Decimal
import math
from test_framework.test_framework import BitcoinTestFramework
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_MONEY,
tx_from_hex,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class MempoolAcceptanceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
for r in result_test:
r.pop('wtxid') # Skip check for now
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
assert_equal(node.getblockcount(), 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
coins = node.listunspent()
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain between 1 and 25 transactions.', lambda: node.testmempoolaccept(rawtxs=['ff22']*26))
assert_raises_rpc_error(-8, 'Array must contain between 1 and 25 transactions.', lambda: node.testmempoolaccept(rawtxs=[]))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = coins.pop() # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
| outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0)
node.generate(1)
self.mempool_size = | 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = Decimal('0.000007')
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): Decimal('0.3') - fee}],
))['hex']
tx = tx_from_hex(raw_tx_0)
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}],
rawtxs=[raw_tx_0],
)
self.log.info('A final transaction not in the mempool')
coin = coins.pop() # Pick a random coin(base) to spend
output_amount = Decimal('0.025')
raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL
outputs=[{node.getnewaddress(): output_amount}],
locktime=node.getblockcount() + 2000, # Can be anything
))['hex']
tx = tx_from_hex(raw_tx_final)
fee_expected = coin['amount'] - output_amount
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0)
self.mempool_size += 1
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size += 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx = tx_from_hex(raw_tx_0)
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
tx = tx_from_hex(raw_tx_0)
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
# take original raw_tx_0
tx = tx_from_hex(raw_tx_0)
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'txn-mempool-conflict'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with missing inputs, that never existed')
tx = tx_from_hex(raw_tx_0)
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx = tx_from_hex(raw_tx_0)
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
|
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_controller_optics_oper.py | Python | apache-2.0 | 95,722 | 0.016203 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'OpticsFormFactorEnum' : _MetaInfoEnum('OpticsFormFactorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'not-set':'NOT_SET',
'invalid':'INVALID',
'cpak':'CPAK',
'cxp':'CXP',
'sfp-plus':'SFP_PLUS',
'qsfp':'QSFP',
'qsfp-plus':'QSFP_PLUS',
'qsfp28':'QSFP28',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsControllerStateEnum' : _MetaInfoEnum('OpticsControllerStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'optics-state-up':'OPTICS_STATE_UP',
'optics-state-down':'OPTICS_STATE_DOWN',
'optics-state-admin-down':'OPTICS_STATE_ADMIN_DOWN',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsLaserStateEnum' : _MetaInfoEnum('OpticsLaserStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'on':'ON',
'off':'OFF',
'unknown':'UNKNOWN',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsPhyEnum' : _MetaInfoEnum('OpticsPhyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'not-set':'NOT_SET',
'invalid':'INVALID',
'long-reach-four-lanes':'LONG_REACH_FOUR_LANES',
'short-reach-ten-lanes':'SHORT_REACH_TEN_LANES',
'short-reach-one-lane':'SHORT_REACH_ONE_LANE',
'long-reach-one-lane':'LONG_REACH_ONE_LANE',
'short-reach-four-lanes':'SHORT_REACH_FOUR_LANES',
'copper-four-lanes':'COPPER_FOUR_LANES',
'active-optical-cable':'ACTIVE_OPTICAL_CABLE',
'fourty-gig-e-long-reach-four-lanes':'FOURTY_GIG_E_LONG_REACH_FOUR_LANES',
'fourty-gig-e-short-reach-four-lanes':'FOURTY_GIG_E_SHORT_REACH_FOUR_LANES',
'cwdm-four-lanes':'CWDM_FOUR_LANES',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsTasEnum' : _MetaInfoEnum('OpticsTasEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'tas-ui-oos':'TAS_UI_OOS',
'tas-ui-main':'TAS_UI_MAIN',
'tas-ui-is':'TAS_UI_IS',
'tas-ui-ains':'TAS_UI_AINS',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsEnum' : _MetaInfoEnum('OpticsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'optics-unknown':'OPTICS_UNKNOWN',
'optics-grey':'OPTICS_GREY',
'optics-dwdm':'OPTICS_DWDM',
'optics-cwdm':'OPTICS_CWDM',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsLedStateEnum' : _MetaInfoEnum('OpticsLedStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'off':'OFF',
'green-on':'GREEN_ON',
'green-flashing':'GREEN_FLASHING',
'yellow-on':'YELLOW_ON',
'yellow-flashing':'YELLOW_FLASHING',
'red-on':'RED_ON',
'red-flashing':'RED_FLASHING',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo' : {
'meta_info' : _MetaInfoClass('O | pticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo',
False,
[
_MetaInfoClassMember('frequency', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Frequency
''',
'frequency',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('g694-chan-num', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 214748 | 3647)], [],
''' G694 channel number
''',
'g694_chan_num',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('itu-chan-num', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ITU channel number
''',
'itu_chan_num',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('wavelength', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Wavelength
''',
'wavelength',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'dwdm-carrier-map-info',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap',
False,
[
_MetaInfoClassMember('dwdm-carrier-band', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' DWDM carrier band
''',
'dwdm_carrier_band',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-map-info', REFERENCE_LIST, 'DwdmCarrierMapInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo',
[], [],
''' DWDM carrier mapping info
''',
'dwdm_carrier_map_info',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-max', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Highest DWDM carrier supported
''',
'dwdm_carrier_max',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-min', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lowest DWDM carrier supported
''',
'dwdm_carrier_min',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-dwdm-carrrier-channel-map',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo',
False,
[
_MetaInfoClassMember('network-srlg', REFERENCE_LEAFLIST, 'int' , None, None,
[(0, 4294967295)], [],
''' Network Srlg
''',
'network_srlg',
'Cisco-IOS-XR-controller-optics-oper', False, max_elements=102),
],
'Cisco-IOS-XR-controller-optics-oper',
'network-srlg-info',
_yang_ns._names |
Eric89GXL/scipy | scipy/optimize/minpack.py | Python | bsd-3-clause | 33,497 | 0.000269 | from __future__ import division, print_function, absolute_import
import threading
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, product, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
| def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
|
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybr |
london-python-project-nights/romaine | test_data/steps/some_steps.py | Python | mit | 444 | 0 | from romaine.steps import Give | n, When, Then, Step, And
@Given('step_1')
def step_1():
pass
@When('step_2')
def step_2():
pass
@Then('step_3')
def step_3():
pass
@And('step_4')
def step_4():
pass
@Step('step_5')
def step_5():
pass
@Given('Given step_6')
def step_6():
pass
@When('When step_7')
def step_7():
pass
@Then('Then st | ep_8')
def step_8():
pass
@And('And step_9')
def step_9():
pass
|
jjcamp/mdgt | mdgt/launcher.py | Python | mit | 3,287 | 0.000304 | import argparse
import configparser
import sys
from io import StringIO
from pathlib import Path
from .mdgt import consolePrint, jsonPrint, listProvs
from .provider import Provider
from .webserve import serve as webserve
def setup_parser():
'''Initialize the argument parser.
Returns:
ArgumentParser instance.
'''
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument(
'provider',
nargs='?',
help="Which provider to use (or, the type of object to query).")
parser.add_argument(
'query',
nargs='?',
help="The query for the provider to consume.")
# Other options
parser.add_argument('-p', '--providers', action='store_true',
help="List avai | lable providers and exit.")
# These arguments affect the output and are exclusive
outputGroup = parser.add_mutually_exclusive_group()
outputGroup.add_argument('-c', '--console', action='store_true',
help="Output console-formatted text (default).")
outputGroup.add_argument('-f', '--config', nargs='?', const=None,
help="Path to configuration file to use.")
outputGroup.add_argument('-j' | , '--json', action='store_true',
help="Output json.")
outputGroup.add_argument('-pd', '--provider-dir', nargs='?', const=None,
help="Directory that contains provider files.")
outputGroup.add_argument('-w', '--webserver', nargs='?', const=8181,
help="Start as a web server daemon on the \
specified port (default 8181).")
return parser
def check_config(args):
'''Check config file and override arguments if needed.
Args:
args: Parsed arguments.
'''
if args.config:
conf_file = Path(args.config)
if not conf_file.exists():
# Provided file does not exist
sys.exit("Configuration file not found")
else:
conf_file = Path('./mdgt.conf')
if conf_file.exists():
# Override args
conf_parser = configparser.ConfigParser()
# Add dummy section for reading
conf_parser.read_string(
StringIO("[mdgt]\n%s" % conf_file.open().read()).read()
)
section = conf_parser['mdgt']
args.console = args.console or section.getboolean('console', True)
args.json = args.json or section.getboolean('json', False)
args.provider_dir = args.provider_dir or section.get('provider-dir')
def main():
'''Application entry point.'''
# Parser
parser = setup_parser()
args = parser.parse_args()
# Config file
check_config(args)
if args.providers:
listProvs()
elif args.webserver:
# TODO: Add error checking once the config file is implemented.
webserve(int(args.webserver))
elif (not args.provider) and (not args.query):
print("Provider and query required. See --help")
elif args.json:
prov = Provider(args.provider, args.provider_dir)
jsonPrint(prov.scrape(args.query))
else:
prov = Provider(args.provider, args.provider_dir)
consolePrint(prov.scrape(args.query))
|
valeros/platformio | scripts/mbed_to_package.py | Python | apache-2.0 | 4,191 | 0.000477 | # Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apach | e License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of | the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import zipfile
from os import getcwd, listdir, makedirs, mkdir, rename
from os.path import isdir, isfile, join
from shutil import move, rmtree
from sys import exit as sys_exit
from sys import path
path.append("..")
from platformio.util import exec_command, get_home_dir
def _unzip_generated_file(mbed_dir, output_dir, mcu):
filename = join(
mbed_dir, "build", "export", "MBED_A1_emblocks_%s.zip" % mcu)
variant_dir = join(output_dir, "variant", mcu)
if isfile(filename):
with zipfile.ZipFile(filename) as zfile:
mkdir(variant_dir)
zfile.extractall(variant_dir)
for f in listdir(join(variant_dir, "MBED_A1")):
if not f.lower().startswith("mbed"):
continue
move(join(variant_dir, "MBED_A1", f), variant_dir)
rename(join(variant_dir, "MBED_A1.eix"),
join(variant_dir, "%s.eix" % mcu))
rmtree(join(variant_dir, "MBED_A1"))
else:
print "Warning! Skipped board: %s" % mcu
def buildlib(mbed_dir, mcu, lib="mbed"):
build_command = [
"python",
join(mbed_dir, "workspace_tools", "build.py"),
"--mcu", mcu,
"-t", "GCC_ARM"
]
if lib is not "mbed":
build_command.append(lib)
build_result = exec_command(build_command, cwd=getcwd())
if build_result['returncode'] != 0:
print "* %s doesn't support %s library!" % (mcu, lib)
def copylibs(mbed_dir, output_dir):
libs = ["dsp", "fat", "net", "rtos", "usb", "usb_host"]
libs_dir = join(output_dir, "libs")
makedirs(libs_dir)
print "Moving generated libraries to framework dir..."
for lib in libs:
if lib == "net":
move(join(mbed_dir, "build", lib, "eth"), libs_dir)
continue
move(join(mbed_dir, "build", lib), libs_dir)
def main(mbed_dir, output_dir):
print "Starting..."
path.append(mbed_dir)
from workspace_tools.export import gccarm
if isdir(output_dir):
print "Deleting previous framework dir..."
rmtree(output_dir)
settings_file = join(mbed_dir, "workspace_tools", "private_settings.py")
if not isfile(settings_file):
with open(settings_file, "w") as f:
f.write("GCC_ARM_PATH = '%s'" %
join(get_home_dir(), "packages", "toolchain-gccarmnoneeabi",
"bin"))
makedirs(join(output_dir, "variant"))
mbed_libs = ["--rtos", "--dsp", "--fat", "--eth", "--usb", "--usb_host"]
for mcu in set(gccarm.GccArm.TARGETS):
print "Processing board: %s" % mcu
buildlib(mbed_dir, mcu)
for lib in mbed_libs:
buildlib(mbed_dir, mcu, lib)
result = exec_command(
["python", join(mbed_dir, "workspace_tools", "project.py"),
"--mcu", mcu, "-i", "emblocks", "-p", "0", "-b"], cwd=getcwd()
)
if result['returncode'] != 0:
print "Unable to build the project for %s" % mcu
continue
_unzip_generated_file(mbed_dir, output_dir, mcu)
copylibs(mbed_dir, output_dir)
with open(join(output_dir, "boards.txt"), "w") as fp:
fp.write("\n".join(sorted(listdir(join(output_dir, "variant")))))
print "Complete!"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mbed', help="The path to mbed framework")
parser.add_argument('--output', help="The path to output directory")
args = vars(parser.parse_args())
sys_exit(main(args["mbed"], args["output"]))
|
EnTeQuAk/dotfiles | sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/env_utils.py | Python | unlicense | 2,798 | 0.000357 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various utility functions used by this plugin"""
import subprocess
from os import environ, devnull
from os.path import expanduser
from .constants import PLATFORM
from .window_utils import get_pref
class NodeNotFoundError(OSError):
def __init__(self, original_exception, node_path):
msg = "Node.js was not found in the default path"
OSError.__init__(self, msg + (": %s" % original_exception))
self.node_path = node_path
class NodeRuntimeError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
class NodeSyntaxError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime syntax error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
def get_node_path():
"""Gets the node.js path specified in this plugin's settings file"" | "
node = get_pref("node_path").get(PLATFORM)
return expanduser(node)
def run_command(args):
"""Runs a command in a shell and returns the output"""
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"env": environ,
}
if PLATFORM == "win | dows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_args["startupinfo"] = startupinfo
popen_args["stdin"] = open(devnull, 'wb')
stdout, stderr = subprocess.Popen(args, **popen_args).communicate()
if stderr:
if b"ExperimentalWarning" in stderr:
# Don't treat node experimental warnings as actual errors.
return stdout
elif b"SyntaxError" in stderr:
raise NodeSyntaxError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
else:
raise NodeRuntimeError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
return stdout
def run_node_command(args):
"""Runs a node command in a shell and returns the output"""
node_path = get_node_path()
try:
stdout = run_command([node_path] + args)
except OSError as err:
if node_path in err.strerror or \
"No such file or directory" in err.strerror or \
"The system cannot find the file specified" in err.strerror:
raise NodeNotFoundError(err, node_path)
else:
raise err
return stdout
|
kennknowles/python-rightarrow | tests/samples/literal_int.py | Python | apache-2.0 | 8 | 0 | 3 | 48593 | 0
|
BlogForever/crawler | bibcrawl/model/objectitem.py | Python | mit | 888 | 0.006757 | """Super class of comment and post item"""
from scrapy.item import Item
class ObjectItem(Item):
"""Extends Scrapy Item interface to be usable with the standard object
attribute notation.
>>> from scrapy.item import Field
>>> cl | ass I(ObjectItem):
... myField = Field()
>>> i = I()
>>> i.myField = 1
>>> i.myField
1
>>> try:
... i.notAField
... except KeyError:
... pass
... else:
... fail
"""
def __setattr__(self, key, value):
"""Sets a fild of the item using object attribute notation."""
if key in self.fields:
super(ObjectItem, self).__setitem__(key, value)
else:
super(ObjectItem, self).__setattr__(key, | value)
def __getattr__(self, key):
"""Gets a fild of the item using object attribute notation."""
# This is enough because __getattr__ is a fallback...
return self[key]
|
OCA/hr | hr_employee_ppe/models/hr_personal_equipment_request.py | Python | agpl-3.0 | 786 | 0 | # Copyright 2021 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class HrPersonalEquipmentRequest(models.Model):
_inherit = "hr.personal.equipment.request"
contains_ppe = fields.Boolean(compute="_compute_contains_ppe")
def _compute_contains_ppe(self):
for rec in self:
for line in rec.line_ids:
if line.is_ppe:
rec.contains_ppe = True
return
else:
rec.contains_ppe = False
de | f action_view_ppe_report(self):
report = self.env["ir.actions.report"]._get_report_from_name(
"hr_employee_ppe.hr_employee_ppe_report_template"
)
return report.report_action(self) | |
memnonila/art | art/asgi.py | Python | mit | 146 | 0 | impo | rt os
import channels.asgi
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'art.settings')
channel_layer = channels.asgi.get_channel_lay | er()
|
207leftovers/cs207project | pype/driver.py | Python | mit | 130 | 0.007692 | #!/usr/bin/env python3
#import pype
impo | rt sys
from pype. | pipeline import *
for fname in sys.argv[1:]:
Pipeline(source=fname)
|
mbakker7/ttim | ttim/aquifer.py | Python | mit | 8,387 | 0.007512 | import numpy as np
import matplotlib.pyplot as plt
import inspect # Used for storing the input
class AquiferData:
def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll,
ltype, topboundary, phreatictop, kzoverkh=None, model3d=False):
'''kzoverkh and model3d only need to be specified when model
is model3d'''
self.model = model
self.kaq = np.atleast_1d(kaq).astype('d')
self.z = np.atleast_1d(z).astype('d')
self.naq = len(self.kaq)
self.nlayers = len(self.z) - 1
self.Haq = np.atleast_1d(Haq).astype('d')
self.Hll = np.atleast_1d(Hll).astype('d')
self.T = self.kaq * self.Haq
self.Tcol = self.T.reshape(self.naq, 1)
self.c = np.atleast_1d(c).astype('d')
self.c[self.c > 1e100] = 1e100
self.Saq = np.atleast_1d(Saq).astype('d')
self.Sll = np.atleast_1d(Sll).astype('d')
self.Sll[self.Sll < 1e-20] = 1e-20 # Cannot be zero
self.poraq = np.atleast_1d(poraq).astype('d')
self.porll = np.atleast_1d(porll).astype('d')
self.ltype = np.atleast_1d(ltype)
self.zaqtop = self.z[:-1][self.ltype == 'a']
self.zaqbot = self.z[1:][self.ltype == 'a']
self.layernumber = np.zeros(self.nlayers, dtype='int')
self.layernumber[self.ltype == 'a'] = np.arange(self.naq)
self.layernumber[self.ltype == 'l'] = np.arange(self.nlayers - self.naq)
if self.ltype[0] == 'a':
self.layernumber[self.ltype == 'l'] += 1 # first leaky layer below first aquifer layer
self.topboundary = topboundary[:3]
self.phreatictop = phreatictop
self.kzoverkh = kzoverkh
if self.kzoverkh is not None:
self.kzoverkh = np.atleast_1d(self.kzoverkh).astype('d')
if len(self.kzoverkh) == 1:
self.kzoverkh = self.kzoverkh * np.ones(self.naq)
self.model3d = model3d
if self.model3d:
assert self.kzoverkh is not None, \
"model3d specified without kzoverkh"
#self.D = self.T / self.Saq
self.area = 1e200 # Smaller than default of ml.aq so that inhom is found
def __repr__(self):
return 'Inhom T: ' + str(self.T)
def initialize(self):
'''
eigval[naq, npval]: Array with eigenvalues
lab[naq, npval]: Array with lambda values
lab2[naq, nint, npint]: Array with lambda values reorganized per
interval
eigvec[naq, naq, npval]: Array with eigenvector matrices
coef[naq ,naq, npval]: Array with coefficients;
coef[ilayers, :, np] are the coefficients if the element is in
ilayers belonging to Laplace parameter number np
'''
# Recompute T for when kaq is changed
self.T = self.kaq * self.Haq
self.Tcol = self.T.reshape(self.naq, 1)
# Compute Saq and Sll
self.Scoefaq = self.Saq * self.Haq
self.Scoefll = self.Sll * self.Hll
if (self.topboundary == 'con') and self.phreatictop:
self.Scoefaq[0] = self.Scoefaq[0] / self.Haq[0]
elif (self.topboundary == 'lea') and self.phreatictop:
self.Scoefll[0] = self.Scoefll[0] / self.Hll[0]
self.D = self.T / self.Scoefaq
# Compute c if model3d for when kaq is changed
if self.model3d:
self.c[1:] = \
0.5 * self.Haq[:-1] / (self.kzoverkh[:-1] * self.kaq[:-1]) + \
0.5 * self.Haq[1:] / (self.kzoverkh[1:] * self.kaq[1:])
#
self.eigval = np.zeros((self.naq, self.model.npval), 'D')
self.lab = np.zeros((self.naq, self.model.npval), 'D')
self.eigvec = np.zeros((self.naq, self.naq, self.model.npval), 'D')
self.coef = np.zeros((self.naq, self.naq, self.model.npval), 'D')
b = np.diag(np.ones(self.naq))
for i in range(self.model.npval):
w, v = self.compute_lab_eigvec(self.model.p[i])
# Eigenvectors are columns of v
self.eigval[:, i] = w; self.eigvec[:, :, i] = v
self.coef[:, :, i] = np.linalg.solve(v, b).T
self.lab = 1.0 / np.sqrt(self.eigval)
self.lab2 = self.lab.copy()
self.lab2.shape = (self.naq, self.model.nint, self.model.npint)
self.lababs = np.abs(self.lab2[:, :, 0]) # used to check distances
self.eigvec2 = self.eigvec.copy()
self.eigvec2.shape = (self.naq, self.naq,
self.model.nint, self.model.npint)
def compute_lab_eigvec(self, p, returnA = False, B = None):
sqrtpSc = np.sqrt( p * self.Scoefll * self.c )
a, b = np.zeros_like(sqrtpSc), np.zeros_like(sqrtpSc)
small = np.abs(sqrtpSc) < 200
a[small] = sqrtpSc[small] / np.tanh(sqrtpSc[small])
b[small] = sqrtpSc[small] / np.sinh(sqrtpSc[small])
a[~small] = sqrtpSc[~small] / ((1.0 - np.exp(-2.0*sqrtpSc[~small])) /
(1.0 + np.exp(-2.0*sqrtpSc[~small])))
b[~small] = sqrtpSc[~small] * 2.0 * np.exp(-sqrtpSc[~small]) / \
(1.0 - np.exp(-2.0*sqrtpSc[~small]))
if (self.topboundary[:3] == 'sem') or (self.topboundary[:3] == 'lea'):
dzero = sqrtpSc[0] * np.tanh(sqrtpSc[0])
d0 = p / self.D
if B is not None:
d0 = d0 * B # B is vector of load efficiency paramters
d0[:-1] += a[1:] / (self.c[1:] * self.T[:-1])
d0[1:] += a[1:] / (self.c[1:] * self.T[1:])
if self.topboundary[:3] == 'lea':
d0[0] += dzero / ( self.c[0] * self.T[0] )
elif self.topboundary[:3] == 'sem':
d0[0] += a[0] / ( self.c[0] * self.T[0] )
dm1 = -b[1:] / (self.c[1:] * self.T[:-1])
dp1 = -b[1:] / (self.c[1:] * self.T[1:])
A = np.diag(dm1,-1) + np.diag(d0,0) + np.diag(dp1,1)
if returnA: return A
w, v = np.linalg.eig(A)
# sorting moved here
index = np.argsort(abs(w))[::-1]
w = w[index]
v = v[:, index]
return w, v
def head_to_potential(self, h, layers):
return h * self.Tcol[layers]
def potential_to_head(self, pot, layers):
return pot / self.Tcol[layers]
def isInside(self,x,y):
print('Must overload AquiferData.isInside method')
return True
def inWhichLayer(self, z):
'''Returns -9999 if above top of system,
+9999 if below bottom of system,
negative for in leaky layer.
leaky layer -n is on top of aquifer n'''
if z > self.zt[0]:
return -9999
for i in range(self.naq-1):
if z >= self.zb[i]:
return i
if z > self.zt[i+1]:
return -i-1
if z >= self.zb[self.naq-1]:
return self.naq - 1
return +9999
def findlayer(self, z):
'''
Returns layer-number, layer-type and model-layer-number'''
if z > self.z[0]:
modellayer = -1
ltype = 'above'
layernumber = None
elif z < self.z[-1]:
modellayer = len(self.layernumber)
ltype = 'below'
layernumber = None
else:
modellayer = np.argwhere((z <= self.z[:-1]) &
(z >= self.z[1: | ]))[0, 0]
layernumber = self.layernumber[modellayer]
ltype = self.ltype[modellayer]
return layernumber, ltype, modellayer
class Aquifer(AquiferData):
def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll,
ltype, topboundary, phreatictop, kzoverkh=None, model3d=False):
AquiferData.__init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll,
| poraq, porll, ltype, topboundary, phreatictop, kzoverkh, model3d)
self.inhomlist = []
self.area = 1e300 # Needed to find smallest inhomogeneity
def __repr__(self):
return 'Background Aquifer T: ' + str(self.T)
def initialize(self):
AquiferData.initialize(self)
for inhom in self.inhomlist:
inhom.initialize()
|
isandlaTech/cohorte-demos | spellchecker/python/repo/spellchecker/spell_dictionary_EN.py | Python | apache-2.0 | 1,902 | 0.003155 | #!/usr/bin/python
#-- Content-Encoding: UTF-8 --
"""
This bundle provides a component that is a simple implementation of the
Dictionary service. It contains some English words.
:authors: Shadi Abras, Thomas Calmant
:copyright: Copyright 2013, isandlaTech
:license: Apache Software License 2.0
"""
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Property, Provides, \
Validate, Invalidate
import logging
_logger = logging.getLogger("spellchecker.spell_dictionary_EN")
# Name the iPOPO component factory
@ComponentFactory("spell_dictionary_en_factory")
# This component provides a dictionary service
@Provides("spell_dictionary_service")
# It is the | English dictionary
@Property("_language","language","EN")
class SpellDictionary(object):
"""
Implementation of a spell dictionary, for French language.
"""
def __init__(self):
"""
Declares | members, to respect PEP-8.
"""
self.dictionary = None
@Validate
def validate(self, context):
"""
The component is validated. This method is called right before the
provided service is registered to the framework.
"""
_logger.info("SpellDictionary EN validated")
self.dictionary = {"hello" , "world", "welcome", "to", "the", "ipopo", "tutorial"}
@Invalidate
def invalidate(self, context):
"""
The component has been invalidated. This method is called right after
the provided service has been removed from the framework.
"""
self.dictionary = None
def check_word(self, word):
"""
Determines if the given word is contained in the dictionary.
@param word the word to be checked.
@return True if the word is in the dictionary, False otherwise.
"""
word = word.lower().strip()
return not word or word in self.dictionary
|
mtzirkel/leakyskiff | quiz/multichoice/migrations/0001_initial.py | Python | mit | 1,525 | 0.002623 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-02 05:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrati | ons.CreateModel(
name='MCAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_na | me='ID')),
('correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='MCChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=100)),
('correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='MCQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('point_value', models.IntegerField(default=1)),
],
),
migrations.AddField(
model_name='mcanswer',
name='question_answer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multichoice.MCChoice'),
),
]
|
openstack/vitrage | vitrage/tests/unit/datasources/test_transformer_base.py | Python | apache-2.0 | 1,151 | 0 | # Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import VertexProperties as VProps
from vitrage.tests import base
# noinspection PyProtectedMember
class BaseTransformerT | est(base.BaseTest):
def _validate_base_vertex_props(self,
vertex,
| expected_name,
expected_datasource_name):
self.assertFalse(vertex[VProps.VITRAGE_IS_PLACEHOLDER])
self.assertEqual(expected_datasource_name, vertex[VProps.VITRAGE_TYPE])
self.assertEqual(expected_name, vertex[VProps.NAME])
|
mirror/vbox | src/VBox/ValidationKit/tests/benchmarks/tdBenchmark1.py | Python | gpl-2.0 | 3,405 | 0.015565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
VirtualBox Validation Kit - Test that runs various benchmarks.
"""
__copyright__ = \
"""
Copyright (C) 2010-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful | , but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$ | "
# Standard Python imports.
import os;
import sys;
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import vbox;
from testdriver import vboxtestvms;
class tdBenchmark1(vbox.TestDriver):
"""
Benchmark #1.
"""
def __init__(self):
vbox.TestDriver.__init__(self);
oTestVm = vboxtestvms.BootSectorTestVm(self.oTestVmSet, 'tst-bs-test1',
os.path.join(self.sVBoxBootSectors, 'bootsector2-test1.img'));
self.oTestVmSet.aoTestVms.append(oTestVm);
#
# Overridden methods.
#
def actionConfig(self):
self._detectValidationKit();
return self.oTestVmSet.actionConfig(self);
def actionExecute(self):
return self.oTestVmSet.actionExecute(self, self.testOneCfg);
#
# Test execution helpers.
#
def testOneCfg(self, oVM, oTestVm):
"""
Runs the specified VM thru the tests.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
fRc = False;
sXmlFile = self.prepareResultFile();
asEnv = [ 'IPRT_TEST_FILE=' + sXmlFile];
self.logVmInfo(oVM);
oSession = self.startVm(oVM, sName = oTestVm.sVmName, asEnv = asEnv);
if oSession is not None:
self.addTask(oSession);
cMsTimeout = 15*60*1000;
if not reporter.isLocal(): ## @todo need to figure a better way of handling timeouts on the testboxes ...
cMsTimeout = self.adjustTimeoutMs(180 * 60000);
oRc = self.waitForTasks(cMsTimeout);
self.removeTask(oSession);
if oRc == oSession:
fRc = oSession.assertPoweredOff();
else:
reporter.error('oRc=%s, expected %s' % (oRc, oSession));
reporter.addSubXmlFile(sXmlFile);
self.terminateVmBySession(oSession);
return fRc;
if __name__ == '__main__':
sys.exit(tdBenchmark1().main(sys.argv));
|
hovel/pybbm | pybb/signals.py | Python | bsd-2-clause | 3,815 | 0.003145 | # coding=utf-8
from __future__ import unicode_literals
from django.contrib.au | th.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save, post_delete, pre_save
from pybb.models import Post, Category, Topic, Forum, create_or_check_slug
from pybb.subscription import notify_topic_subscribers, notify_forum_subscribers
from pybb import util, def | aults, compat
from pybb.permissions import perms
def topic_saved(instance, **kwargs):
if kwargs['created']:
notify_forum_subscribers(instance)
def post_saved(instance, **kwargs):
# signal triggered by loaddata command, ignore
if kwargs.get('raw', False):
return
if getattr(instance, '_post_saved_done', False):
#Do not spam users when post is saved more than once in a same request.
#For eg, when we parse attachments.
return
instance._post_saved_done = True
if not defaults.PYBB_DISABLE_NOTIFICATIONS:
notify_topic_subscribers(instance)
if util.get_pybb_profile(instance.user).autosubscribe and \
perms.may_subscribe_topic(instance.user, instance.topic):
instance.topic.subscribers.add(instance.user)
if kwargs['created']:
profile = util.get_pybb_profile(instance.user)
profile.post_count = instance.user.posts.count()
profile.save()
def post_deleted(instance, **kwargs):
Profile = util.get_pybb_profile_model()
User = compat.get_user_model()
try:
profile = util.get_pybb_profile(instance.user)
except (Profile.DoesNotExist, User.DoesNotExist) as e:
#When we cascade delete an user, profile and posts are also deleted
pass
else:
profile.post_count = instance.user.posts.count()
profile.save()
def user_saved(instance, created, **kwargs):
# signal triggered by loaddata command, ignore
if kwargs.get('raw', False):
return
if not created:
return
try:
add_post_permission = Permission.objects.get_by_natural_key('add_post', 'pybb', 'post')
add_topic_permission = Permission.objects.get_by_natural_key('add_topic', 'pybb', 'topic')
except (Permission.DoesNotExist, ContentType.DoesNotExist):
return
instance.user_permissions.add(add_post_permission, add_topic_permission)
instance.save()
if defaults.PYBB_PROFILE_RELATED_NAME:
ModelProfile = util.get_pybb_profile_model()
profile = ModelProfile()
setattr(instance, defaults.PYBB_PROFILE_RELATED_NAME, profile)
profile.save()
def get_save_slug(extra_field=None):
'''
Returns a function to add or make an instance's slug unique
:param extra_field: field needed in case of a unique_together.
'''
if extra_field:
def save_slug(**kwargs):
extra_filters = {}
extra_filters[extra_field] = getattr(kwargs.get('instance'), extra_field)
kwargs['instance'].slug = create_or_check_slug(kwargs['instance'], kwargs['sender'], **extra_filters)
else:
def save_slug(**kwargs):
kwargs['instance'].slug = create_or_check_slug(kwargs['instance'], kwargs['sender'])
return save_slug
pre_save_category_slug = get_save_slug()
pre_save_forum_slug = get_save_slug('category')
pre_save_topic_slug = get_save_slug('forum')
def setup():
pre_save.connect(pre_save_category_slug, sender=Category)
pre_save.connect(pre_save_forum_slug, sender=Forum)
pre_save.connect(pre_save_topic_slug, sender=Topic)
post_save.connect(topic_saved, sender=Topic)
post_save.connect(post_saved, sender=Post)
post_delete.connect(post_deleted, sender=Post)
if defaults.PYBB_AUTO_USER_PERMISSIONS:
post_save.connect(user_saved, sender=compat.get_user_model())
|
nish10z/CONCUSS | lib/pattern_counting/double_count/color_count.py | Python | bsd-3-clause | 2,244 | 0 | #
# This file is part of CONCUSS, https://github.com/theoryinpractice/concuss/,
# and is Copyright (C) North Carolina State University, 2015. It is licensed
# under the three-clause BSD license; see LICENSE.
#
from collections import Counter
from count_combiner import CountCombiner
class ColorCount(CountCombiner):
"""
Perform dynamic programming with a special table which keeps track
of colors.
By keeping track of colors, ColorCount allows us to not have to
look at smaller sets of colors. This gets passed all the way back
to the DecompGenerator, so the decompositions with fewer than p
colors are never even created. After processing all decompositions
with one set of colors, we fill the counts found into a large table
called totals. If an entry of totals is already full, we don't
change it; if it's 0, we can put our new count in. When returning
the final cou | nt, we simply add all the entries in totals.
"""
def __init__(self, p, coloring, table_hints, td, execdata_file=None):
"""Create tables for keeping track of the separate counts"""
super(ColorCount, self).__init__(p, coloring, table_hints, td)
self.totals = Counter()
self.raw_count = Counter()
self.tree_depth = self.min_p
self.n_colo | rs = None
from lib.pattern_counting.dp import ColorDPTable
self.table_type = ColorDPTable
def table(self, G):
"""Make an appropriate DPTable, given the hints specified"""
return self.table_type(G, reuse=self.table_hints['reuse'])
def before_color_set(self, colors):
"""Clear the raw_count for the new color set"""
self.n_colors = len(colors)
self.raw_count.clear()
def combine_count(self, count):
"""Add the count returned from dynamic programming on one TDD"""
if self.tree_depth <= self.n_colors <= self.min_p:
self.raw_count += count
def after_color_set(self, colors):
"""Combine the count for this color set into the total count"""
self.totals |= self.raw_count
def get_count(self):
"""Return the total number of occurrences of the pattern seen"""
return sum(self.totals.itervalues())
|
google-research/federated | compressed_communication/aggregators/stdev_weights.py | Python | apache-2.0 | 3,289 | 0.005473 | # Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tff.aggregator for tracking the standard deviation of client model weights."""
import tensorflow as tf
import tensorflow_federated as tff
class StdevWeightsFactory(tf | f.aggregators.UnweightedAggregationFactory):
r"""Aggregator reporting the standard deviation of client weights as a metric.
The created tff.templates.AggregationProcess sums values placed at CLIENTS,
and outputs the sum placed at SERVER.
The process has empty state and returns the standard deviation over client
values in measurements. For computing the result summation over client values,
implementation delegates to the tff.federated_sum operator.
The standard deviation returned in measurements is either a single value if
value_type is a single tensor of weights, or a list of values - one for each
layer - if the client value_type is a struct of weight tensors. Standard
deviation is computed by taking the second moment of weights on each client,
ie mean_{v \in values}(v**2), then taking a federated mean of these second
moments on the server and federated square root.
"""
def create(self, value_type):
if not (tff.types.is_structure_of_floats(value_type) or
(value_type.is_tensor() and value_type.dtype == tf.float32)):
raise ValueError("Expect value_type to be float tensor or structure of "
f"float tensors, found {value_type}.")
@tff.federated_computation()
def init_fn():
return tff.federated_value((), tff.SERVER)
@tff.tf_computation(value_type)
def compute_client_mean_second_moment(value):
second_moment = tf.nest.map_structure(tf.math.square, value)
client_mean_second_moment = tf.nest.map_structure(tf.math.reduce_mean,
second_moment)
return client_mean_second_moment
@tff.tf_computation(compute_client_mean_second_moment.type_signature.result)
def compute_sqrt(mean_client_second_moments):
return tf.math.sqrt(mean_client_second_moments)
@tff.federated_computation(init_fn.type_signature.result,
tff.type_at_clients(value_type))
def next_fn(state, value):
summed_value = tff.federated_sum(value)
client_second_moments = tff.federated_map(
compute_client_mean_second_moment, value)
mean_client_second_moments = tff.federated_mean(client_second_moments)
server_stdev = tff.federated_map(compute_sqrt,
mean_client_second_moments)
return tff.templates.MeasuredProcessOutput(
state=state, result=summed_value, measurements=server_stdev)
return tff.templates.AggregationProcess(init_fn, next_fn)
|
rodrigocnascimento/django-teste | product/migrations/0010_auto_20170324_1304.py | Python | mit | 1,504 | 0.001995 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-24 16:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0009_auto_20170323_1823'),
]
operations = [
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sale_date', models.DateTimeField()),
],
),
migrations.AddField(
model_name='product',
name='product_price',
field=models.FloatField(default=0.0),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='product_qtd',
field=models.IntegerField(default=1),
preserve_ | default=False,
),
migrations.AddField(
model_name='sale',
name='product',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCAD | E, to='product.Product'),
),
migrations.AddField(
model_name='sale',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
sdrogers/ms2ldaviz | ms2ldaviz/add_topic_dict_beer3.py | Python | mit | 579 | 0.032815 | import pickle
import numpy as np
import sys
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
import jsonpickle
from basi | cviz.models import Experiment,Mass2Motif
if __name__ == '__main__':
with open('/Users/simon/git/lda/notebooks/beer3.dict','r') as f:
lda = pickle.load(f)
experiment = Experiment.objects.get(name='beer3')
for m2m in lda['topic_metadata']:
motif = Mass2Motif.objects.get(name = m2m,experiment=exp | eriment)
motif.metadata = jsonpickle.encode(lda['topic_metadata'][m2m])
motif.save()
|
LBenzahia/cltk | cltk/corpus/old_norse/runes.py | Python | mit | 8,337 | 0.003377 | """
Sources:
- Viking Language 1 by Jessie L. Byock 2013
Unicode: 16A0–16FF
"""
from enum import Enum, auto
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
POINT = "᛫"
SEMI_COLUMN = "\u16EC"
class AutoName(Enum):
def _generate_next_value_(name, a, b, d):
return name
class RunicAlphabetName(AutoName):
elder_futhark = auto()
younger_futhark = auto()
short_twig_younger_futhark = auto()
class Rune:
u"""
>>> Rune(RunicAlphabetName.elder_futhark, "\u16BA", "h", "h", "haglaz")
ᚺ
>>> Rune.display_runes(ELDER_FUTHARK)
['ᚠ', 'ᚢ', 'ᚦ', 'ᚨ', 'ᚱ', 'ᚲ', 'ᚷ', 'ᚹ', 'ᚺ', 'ᚾ', 'ᛁ', 'ᛃ', 'ᛇ', 'ᛈ', 'ᛉ', 'ᛊ', 'ᛏ', 'ᛒ', 'ᛖ', 'ᛗ', 'ᛚ', 'ᛜ', 'ᛟ', 'ᛞ']
"""
def __init__(self, runic_alphabet: RunicAlphabetName, form: str, sound: str, transcription: str, name: str):
"""
:param runic_alphabet: RunicAlphabetName
:param form: str
:param sound: str
:param transcription: str
:param name: str
"""
self.runic_alphabet = runic_alphabet
self.form = form
self.sound = sound
self.transcription = transcription
self.name = name
@staticmethod
def display_runes(runic_alphabet: list):
"""
Displays the given runic alphabet.
:param runic_alphabet: list
:return: list
"""
return [rune.form for rune in runic_alphabet]
@staticmethod
def from_form_to_transcription(form: str, runic_alphabet: list):
"""
:param form:
:param runic_alphabet:
:return: conventional transcription of the rune
"""
d_form_transcription = {rune.form: rune.transcription for rune in runic_alphabet}
return d_form_transcription[form]
def __repr__(self):
return self.form
def __str__(self):
return self.form
def __eq__(self, other):
return self.form == other
class Transcriber:
"""
>>> little_jelling_stone = "᛬ᚴᚢᚱᛘᛦ᛬ᚴᚢᚾᚢᚴᛦ᛬ᚴ(ᛅᚱ)ᚦᛁ᛬ᚴᚢᛒᛚ᛬ᚦᚢᛋᛁ᛬ᛅ(ᚠᛏ)᛬ᚦᚢᚱᚢᛁ᛬ᚴᚢᚾᚢ᛬ᛋᛁᚾᛅ᛬ᛏᛅᚾᛘᛅᚱᚴᛅᛦ᛬ᛒᚢᛏ᛬"
>>> Transcriber.transcribe(little_jelling_stone, YOUNGER_FUTHARK)
'᛫kurmR᛫kunukR᛫k(ar)þi᛫kubl᛫þusi᛫a(ft)᛫þurui᛫kunu᛫sina᛫tanmarkaR᛫but᛫'
"""
def __init__(self):
pass
@staticmethod
def from_form_to_transcription(runic_alphabet: list):
"""
Make a dictionary whose keys are forms of runes and values their transcriptions.
Used by transcribe method.
:param runic_alphabet:
:return: dict
"""
return {rune.form: rune.transcription for rune in runic_alphabet}
@staticmethod
def transcribe(rune_sentence: str, runic_alphabet: list):
"""
From a runic inscription, the transcribe method gives a conventional transcription.
:param rune_sentence: str, elements of this are from runic_alphabet or are punctuations
:param runic_alphabet: list
:return:
"""
res = []
d_form_transcription = Transcriber.from_form_to_transcription(runic_alphabet)
for c in rune_sentence:
if c in runic_alphabet:
res.append(d_form_transcription[c])
elif c in "()":
res.append(c)
else:
res.append(POINT)
return "".join(res)
# ᚠ ᚢ ᚦ ᚨ ᚱ ᚲ ᚷ ᚹ ᚺ ᚾ ᛁ ᛃ ᛇ ᛈ ᛉ ᛊ ᛏ ᛒ ᛖ ᛗ ᛚ ᛜ ᛟ ᛞ
ELDER_FUTHARK = [
Rune(RunicAlphabetName.elder_futhark, "\u16A0", "f", "f", "fehu"),
Rune(RunicAlphabetName.elder_futhark, "\u16A2", "u", "u", "uruz"),
Rune(RunicAlphabetName.elder_futhark, "\u16A6", "θ", "þ", "þuriaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16A8", "a", "a", "ansuz"),
Rune(RunicAlphabetName.elder_futhark, "\u16B1", "r", "r", "raido"),
Rune(RunicAlphabetName.elder_futhark, "\u16B2", "k", "k", "kaunan"),
Rune(RunicAlphabetName.elder_futhark, "\u16B7", "g", "g", "gyfu"),
Rune(RunicAlphabetName.elder_futhark, "\u16B9", "w", "w", "wynn"),
Rune(RunicAlphabetName.elder_futhark, "\u16BA", "h", "h", "haglaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16BE", "n", "n", "naudiz"),
Rune(RunicAlphabetName.elder_futhark, "\u16C1", "i", "i", "isaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16C3", "j", "j", "jeran"),
Rune(RunicAlphabetName.elder_futhark, "\u16C7", "æ", "E", "eiwaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16C8", "p", "p", "peorð"),
Rune(RunicAlphabetName.elder_futhark, "\u16C9", "ʀ", "r", "algiz"),
Rune(RunicAlphabetName.elder_futhark, "\u16CA", "s", "s", "sowilo"),
Rune(RunicAlphabetName.elder_futhark, "\u16CF", "t", "t", "tiwaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16D2", "b", "b", "berkanan"),
Rune(RunicAlphabetName.elder_futhark, "\u16D6", "e", "e", "ehwaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16D7", "m", "m", "mannaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16DA", "l", "l", "laguz"),
Rune(RunicAlphabetName.elder_futhark, "\u16DC", "ŋ", "ng", "ingwaz"),
Rune(RunicAlphabetName.elder_futhark, "\u16DF", "ø", "œ", "odal"),
Rune(RunicAlphabetName.elder_futhark, "\u16DE", "d", "d", "dagaz"),
]
# ᚠ ᚢ ᚦ ᚭ ᚱ ᚴ ᚼ ᚾ ᛁ ᛅ ᛋ ᛏ ᛒ ᛖ ᛘ ᛚ ᛦ
YOUNGER_FUTHARK = [
Rune(RunicAlphabetName.younger_futhark, "\u16A0", "f", "f", "fehu"),
Rune(RunicAlphabetName.younger_futhark, "\u16A2", "u", "u", "uruz"),
Rune(RunicAlphabetName.younger_futhark, "\u16A6", "θ", "þ", "þuriaz"),
Rune(RunicAlphabetName.younger_futhark, "\u16AD", "a", "a", "ansuz"),
Rune(RunicAlphabetName.younger_futhark, "\u16B1", "r", "r", "raido"),
Rune(RunicAlphabetName.younger_futhark, "\u16B4", "k", "k", "kaunan"),
Rune(RunicAlphabetName.younger_futhark, "\u16BC", "h", "h", "haglaz"),
Rune(RunicAlphabetName.younger_futhark, "\u16BE", "n", "n", "naudiz"),
Rune(RunicAlphabetName.younger_futhark, "\u16C1", "i", "i", "isaz"),
Rune(RunicAlphabetName.younger_futhark, "\u16C5", "a", "a", "jeran"),
Rune(RunicAlphabetName.younger_futhark, "\u16CB", "s", "s", "sowilo"),
Rune(RunicAlphabetName.younger_futhark, "\u16CF", "t", "t", "tiwaz"),
Rune(RunicAlphabetName.younger_futhark, "\u16D2", "b", "b", "berkanan"),
Rune(RunicAlphabetName.younger_futhark, "\u16D6", "e", "e", "ehwaz"),
Rune(RunicAlphabetName.younger_futhark, "\u16D8", "m", "m", "mannaz"), # also \u16D9
Rune(RunicAlphabetName.younger_futhark, "\u16DA", "l", "l", "laguz"),
Rune(RunicAlphabetName.younger_futhark, "\u16E6", "r", "R", "algiz")
]
# ᚠ ᚢ ᚦ ᚭ ᚱ ᚴ ᚽ ᚿ ᛁ ᛅ ᛌ ᛐ ᛓ ᛖ ᛙ ᛚ ᛧ
SHORT_TWIG_Y | OUNGER_FUTHARK = [
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16A0", "f", "f", "fehu"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16A2", "u", "u", "uruz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16A6", "θ", "þ", "þuriaz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16AD", "a", "a", "ansuz"),
Rune(RunicAlphabetName.short_t | wig_younger_futhark, "\u16B1", "r", "r", "raido"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16B4", "k", "k", "kaunan"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16BD", "h", "h", "haglaz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16BF", "n", "n", "naudiz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16C1", "i", "i", "isaz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16C5", "a", "a", "jeran"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16CC", "s", "s", "sowilo"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16D0", "t", "t", "tiwaz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16D3", "b", "b", "berkanan"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16D6", "e", "e", "ehwaz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16D9", "m", "m", "mannaz"), # also \u16D9
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16DA", "l", "l", "laguz"),
Rune(RunicAlphabetName.short_twig_younger_futhark, "\u16E7", "r", "R", "algiz"),
]
|
PereBal/advanced-algorithms | nqueens/controller.py | Python | mit | 2,927 | 0.000342 | import os
import json
from base import BaseController
from nqueens.models import Piece, Panel, Meta
class NQueensController(BaseController):
def __init__(self, view):
super(NQueensController, self)
self._piece_data = None
self._piece_cache = None
self.view = view
@classmethod
def get_instance(cls, view):
return cls(view)
def pre_switch(self):
pass
def start(self):
dim = self.view.get_dimension()
# Cached factory, only 1 file read per list
pieces = [Piece.from_file(self._piece_data) for i in range(dim)]
panel = Panel(dim)
self | .view.notify({
'func': 'update_panel',
'data': {
'pieces': {},
}
})
res = s | elf.run(panel, pieces, idx=0, ci=0)
if res:
self.view.notify({
'func': 'update_panel',
'data': {
'pieces': panel.pieces,
}
})
else:
self.view.notify({
'func': 'display_error',
'data': {
'message': 'No solution found :(',
}
})
def run(self, panel, pieces, idx, ci):
dim = panel.dimension
# Base case
if idx == len(pieces):
return True
else:
# Ultra-fast because:
# 1. All the pieces are the same (less combinations and shit)
# 2. We start from the previous index, this makes the panel smaller
# each time
# 3. Instead of keeping track of the killing positions we do a
# check each time a piece is added in order to avoid a kill
# (which is faster)
# 4. Python dict operations are astonishingly fast
for i in range(ci, dim):
for j in range(dim):
if panel.add_piece(pieces[idx], (i, j)):
if self.run(panel, pieces, idx+1, i):
return True
else:
panel.remove_piece(pieces[idx])
return False
def piece_selected(self, piece_name):
if not self._piece_cache:
self._piece_cache = Meta.get_piece_definitions()
self._piece_data = self._piece_cache.get(piece_name)
if self._piece_data:
self._piece_data = self._piece_data[1]
self.view.notify({
'func': 'enable_run',
'data': {
'enable': bool(self._piece_data),
}
})
@staticmethod
def get_pieces_attr(attr):
candidates = Meta.get_piece_definitions()
if all(attr in candidate[0].keys() for candidate in candidates.values()):
return [candidate[0][attr] for candidate in candidates.values()]
else:
return []
|
mrknow/filmkodi | plugin.video.fanfilm/resources/lib/resolvers/sawlive.py | Python | apache-2.0 | 2,023 | 0.009392 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def | resolve(url):
try:
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
page = 'http://sawlive.tv/embed/%s' % page
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
result = client.request(page, referer=referer)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: pass
result += unpacked
result = urllib.unquote_plus(result)
result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]
url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0]
url = '/'.join(url)
result = client.request(url, referer=referer)
strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
return url
except:
return
|
habibmasuro/django-wiki | wiki/migrations/0009_auto__add_field_imagerevision_width__add_field_imagerevision_height.py | Python | gpl-3.0 | 18,186 | 0.008083 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImageRevision.width'
db.add_column('wiki_imagerevision', 'width',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding field 'ImageRevision.height'
db.add_column('wiki_imagerevision', 'height',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImageRevision.width'
db.delete_column('wiki_imagerevision', 'width')
# Deleting field 'ImageRevision.height'
db.delete_column('wiki_imagerevision', 'height')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.mod | els.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
| 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_notify.notificationtype': {
'Meta': {'object_name': 'NotificationType', 'db_table': "'notify_notificationtype'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'django_notify.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'notify_settings'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'django_notify.subscription': {
'Meta': {'object_name': 'Subscription', 'db_table': "'notify_subscription'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.NotificationType']"}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'settings': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.Settings']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_articles'", 'null': 'True', 'to': "orm['%s']" % user_orm_label})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
|
aglitke/vdsm | vdsm/vm.py | Python | gpl-2.0 | 198,643 | 0.000252 | #
# Copyright 2008-2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
# stdlib imports
from contextlib import contextmanager
from copy import deepcopy
from xml.dom import Node
from xml.dom.minidom import parseString as _domParseStr
import logging
import os
import pickle
import tempfile
import threading
import time
import xml.dom.minidom
# 3rd party libs imports
import libvirt
# vdsm imports
from vdsm import constants
from vdsm import libvirtconnection
from vdsm import netinfo
from vdsm import qemuImg
from vdsm import utils
from vdsm import vdscli
from vdsm.config import config
from vdsm.define import ERROR, NORMAL, doneCode, errCode
from vdsm.netinfo import DUMMY_BRIDGE
from storage import outOfProcess as oop
from storage import sd
from storage import fileUtils
# local imports
from logUtils import SimpleLogAdapter
import caps
import guestIF
import hooks
import kaxmlrpclib
import sampling
import supervdsm
_VMCHANNEL_DEVICE_NAME = 'com.redhat.rhevm.vdsm'
# This device name is used as default both in the qemu-guest-agent
# service/daemon and in libvirtd (to be used with the quiesce flag).
_QEMU_GA_DEVICE_NAME = 'org.qemu.guest_agent.0'
_AGENT_CHANNEL_DEVICES = (_VMCHANNEL_DEVICE_NAME, _QEMU_GA_DEVICE_NAME)
DEFAULT_BRIDGE = config.get("vars", "default_bridge")
DISK_DEVICES = 'disk'
NIC_DEVICES = 'interface'
VIDEO_DEVICES | = 'video'
SOUND_DEVICES = 'sound'
CONTROLLER_DEVICES = 'controller'
GENERAL_DEVICES = 'general'
BALLOON_DEVICES = 'balloon'
REDIR_DEVICES = 'redir'
WATCHDOG_DEVICES = 'watchdog'
CONSOLE_DEVICES = 'console'
SMARTCARD_DEVICES = 'smartcard'
def isVdsmImage(drive):
return all(k in drive.keys() for k in ('volumeID', 'domainID', 'imageID',
'poolID'))
de | f _filterSnappableDiskDevices(diskDeviceXmlElements):
return filter(lambda(x): not(x.getAttribute('device')) or
x.getAttribute('device') in ['disk', 'lun'],
diskDeviceXmlElements)
class _MigrationError(RuntimeError):
pass
class MigrationSourceThread(threading.Thread):
"""
A thread that takes care of migration on the source vdsm.
"""
_ongoingMigrations = threading.BoundedSemaphore(1)
@classmethod
def setMaxOutgoingMigrations(cls, n):
"""Set the initial value of the _ongoingMigrations semaphore.
must not be called after any vm has been run."""
cls._ongoingMigrations = threading.BoundedSemaphore(n)
def __init__(self, vm, dst='', dstparams='',
mode='remote', method='online',
tunneled=False, dstqemu='', abortOnError=False, **kwargs):
self.log = vm.log
self._vm = vm
self._dst = dst
self._mode = mode
self._method = method
self._dstparams = dstparams
self._machineParams = {}
self._tunneled = utils.tobool(tunneled)
self._abortOnError = utils.tobool(abortOnError)
self._dstqemu = dstqemu
self._downtime = kwargs.get('downtime') or \
config.get('vars', 'migration_downtime')
self.status = {
'status': {
'code': 0,
'message': 'Migration in progress'},
'progress': 0}
threading.Thread.__init__(self)
self._preparingMigrationEvt = True
self._migrationCanceledEvt = False
self._monitorThread = None
def getStat(self):
"""
Get the status of the migration.
"""
if self._monitorThread is not None:
# fetch migration status from the monitor thread
self.status['progress'] = int(
float(self._monitorThread.data_progress +
self._monitorThread.mem_progress) / 2)
return self.status
def _setupVdsConnection(self):
if self._mode == 'file':
return
# FIXME: The port will depend on the binding being used.
# This assumes xmlrpc
hostPort = vdscli.cannonizeHostPort(
self._dst, self._vm.cif.bindings['xmlrpc'].serverPort)
self.remoteHost, self.remotePort = hostPort.rsplit(':', 1)
if config.getboolean('vars', 'ssl'):
self.destServer = vdscli.connect(
hostPort,
useSSL=True,
TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
else:
self.destServer = kaxmlrpclib.Server('http://' + hostPort)
self.log.debug('Destination server is: ' + hostPort)
try:
self.log.debug('Initiating connection with destination')
status = self.destServer.getVmStats(self._vm.id)
if not status['status']['code']:
self.log.error("Machine already exists on the destination")
self.status = errCode['exist']
except:
self.log.error("Error initiating connection", exc_info=True)
self.status = errCode['noConPeer']
def _setupRemoteMachineParams(self):
self._machineParams.update(self._vm.status())
# patch VM config for targets < 3.1
self._patchConfigForLegacy()
self._machineParams['elapsedTimeOffset'] = \
time.time() - self._vm._startTime
vmStats = self._vm.getStats()
if 'username' in vmStats:
self._machineParams['username'] = vmStats['username']
if 'guestIPs' in vmStats:
self._machineParams['guestIPs'] = vmStats['guestIPs']
if 'guestFQDN' in vmStats:
self._machineParams['guestFQDN'] = vmStats['guestFQDN']
for k in ('_migrationParams', 'pid'):
if k in self._machineParams:
del self._machineParams[k]
if self._mode != 'file':
self._machineParams['migrationDest'] = 'libvirt'
self._machineParams['_srcDomXML'] = self._vm._dom.XMLDesc(0)
def _prepareGuest(self):
if self._mode == 'file':
self.log.debug("Save State begins")
if self._vm.guestAgent.isResponsive():
lockTimeout = 30
else:
lockTimeout = 0
self._vm.guestAgent.desktopLock()
#wait for lock or timeout
while lockTimeout:
if self._vm.getStats()['session'] in ["Locked", "LoggedOff"]:
break
time.sleep(1)
lockTimeout -= 1
if lockTimeout == 0:
self.log.warning('Agent ' + self._vm.id +
' unresponsive. Hiberanting without '
'desktopLock.')
break
self._vm.pause('Saving State')
else:
self.log.debug("Migration started")
self._vm.lastStatus = 'Migration Source'
def _recover(self, message):
if not self.status['status']['code']:
self.status = errCode['migrateErr']
self.log.error(message)
if self._mode != 'file':
try:
self.destServer.destroy(self._vm.id)
except:
self.log.error("Failed to destroy remote VM", exc_info=True)
# if the guest was stopped before migration, we need to cont it
if self._mode == 'file' or self._method != 'online':
self._vm.cont()
# either way, migration has finished
self._vm.lastStatus = 'Up'
def _finishSuccessfully(self):
|
jayme-github/CouchPotatoServer | couchpotato/core/_base/updater/main.py | Python | gpl-3.0 | 14,024 | 0.005134 | from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from datetime import datetime
from dateutil.parser import parse
from git.repository import LocalRepository
import json
import os
import shutil
import tarfile
import time
import traceback
import version
log = CPLog(__name__)
class Updater(Plugin):
available_notified = False
def __init__(self):
if Env.get('desktop'):
self.updater = DesktopUpdater()
elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')):
self.updater = GitUpdater(self.conf('git_command', default = 'git'))
else:
self.updater = SourceUpdater()
fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6)
addEvent('app.load', self.autoUpdate)
addEvent('updater.info', self.info)
addApiView('updater.info', self.getInfo, docs = {
'desc': 'Get updater information',
'return': {
'type': 'object',
'example': """{
'last_check': "last checked for update",
'update_version': "available update version or empty",
'version': current_cp_version
}"""}
})
addApiView('updater.update', self.doUpdateView)
addApiView('updater.check', self.checkView, docs = {
'desc': 'Check for available update',
'return': {'type': 'see updater.info'}
})
def autoUpdate(self):
if self.check() and self.conf('automatic') and not self.updater.update_failed:
if self.updater.doUpdate():
# Notify before restarting
try:
if self.conf('notification'):
info = self.updater.info()
version_date = datetime.fromtimestamp(info['update_version']['date'])
fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
except:
log.error('Failed notifying for update: %s', traceback.format_exc())
fireEventAsync('app.restart')
return True
return False
def check(self):
if self.isDisabled():
return
if self.updater.check():
if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
fireEvent('updater.available', message = 'A new update is available', data = self.updater.info())
self.available_notified = True
return True
return False
def info(self):
return self.updater.info()
def getInfo(self):
return jsonified(self.updater.info())
def checkView(self):
return jsonified({
'update_available': self.check(),
'info': self.updater.info()
})
def doUpdateView(self):
self.check()
if not self.updater.update_version:
log.error('Trying to update when no update is available.')
success = False
else:
success = self.updater.doUpdate()
if success:
fireEventAsync('app.restart')
# Assume the updater handles things
if not success:
success = True
return jsonified({
'success': success
})
class BaseUpdater(Plugin):
repo_user = 'jayme-github'
repo_name = 'CouchPotatoServer'
branch = version.BRANCH
version = None
update_failed = False
update_version = None
last_check = 0
def doUpdate(self):
pass
def getInfo(self):
return jsonified(self.info())
def info(self):
return {
'last_check': self.last_check,
'update_version': self.update_version,
'version': self.getVersion(),
'repo_name': '%s/%s' % (self.repo_user, self.repo_name),
'branch': self.branch,
}
def check(self):
pass
def deletePyc(self, only_excess = True):
for root, dirs, files in os.walk(ss(Env.get('app_dir'))):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
class GitUpdater(BaseUpdater):
def __init__(self, git_command):
self.repo = LocalRepository(Env.get('app_dir'), command = git_command)
def doUpdate(self):
try:
log.debug('Stashing local changes')
self.repo.saveStash()
log.info('Updating to latest version')
self.repo.pull()
# Delete leftover .pyc files
self.deletePyc()
return True
except:
log.error('Failed updating via GIT: %s', traceback.format_exc())
self.update_failed = True
return False
def getVersion(self):
if not self.version:
try:
output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash)
self.version = { |
'hash': output.hash[:8],
'date': output.getDate(),
'type': 'git',
| }
except Exception, e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
return 'No GIT'
return self.version
def check(self):
if self.update_version:
return True
log.info('Checking for new version on github for %s', self.repo_name)
if not Env.get('dev'):
self.repo.fetch()
current_branch = self.repo.getCurrentBranch().name
for branch in self.repo.getRemoteByName('origin').getBranches():
if current_branch == branch.name:
local = self.repo.getHead()
remote = branch.getHead()
log.info('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))
if local.getDate() < remote.getDate():
self.update_version = {
'hash': remote.hash[:8],
'date': remote.getDate(),
}
return True
self.last_check = time.time()
return False
class SourceUpdater(BaseUpdater):
def __init__(self):
# Create version file in cache
self.version_file = os.path.join(Env.get('cache_dir'), 'version')
if not os.path.isfile(self.version_file):
self.createFile(self.version_file, json.dumps(self.latestCommit()))
def doUpdate(self):
try:
url = 'https://github.com/%s/%s/tarball/%s' % (self.repo_user, self.repo_name, self.branch)
destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash') + '.tar.gz')
extracted_path = os.path.join(Env.get('cache_dir'), 'temp_ |
HazyTeam/platform_external_wpa_supplicant_8 | wpa_supplicant/examples/p2p-nfc.py | Python | gpl-2.0 | 20,200 | 0.002277 | #!/usr/bin/python
#
# Example nfcpy to wpa_supplicant wrapper for P2P NFC operations
# Copyright (c) 2012-2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import sys
import time
import random
import threading
import argparse
import nfc
import nfc.ndef
import nfc.llcp
import nfc.handover
import logging
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
ifname = None
init_on_touch = False
in_raw_mode = False
prev_tcgetattr = 0
include_wps_req = True
include_p2p_req = True
no_input = False
srv = None
continue_loop = True
terminate_now = False
summary_file = None
success_file = None
def summary(txt):
print txt
if summary_file:
with open(summary_file, 'a') as f:
f.write(txt + "\n")
def success_report(txt):
summary(txt)
if success_file:
with open(success_file, 'a') as f:
f.write(txt + "\n")
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError, error:
print "Could not find wpa_supplicant: ", error
return None
if len(ifaces) < 1:
print "No wpa_supplicant control interface found"
return None
for ctrl in ifaces:
if ifname:
if ifname not in ctrl:
continue
try:
print "Trying to use control interface " + ctrl
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception, e:
pass
return None
def wpas_tag_read(message):
wpas = wpas_connect()
if (wpas == None):
return False
cmd = "WPS_NFC_TAG_READ " + str(message).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
if "FAIL" in wpas.request(cmd):
return Fal | se
return True
def wpas_get_handover_req():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_req_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF WPS-CR").rstrip()
if "FAIL" in res:
return | None
return res.decode("hex")
def wpas_get_handover_sel(tag=False):
wpas = wpas_connect()
if (wpas == None):
return None
if tag:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip()
else:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_sel_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF WPS-CR");
if "FAIL" in res:
return None
return res.rstrip().decode("hex")
def wpas_report_handover(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " P2P " + str(req).encode("hex") + " " + str(sel).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def wpas_report_handover_wsc(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " WPS " + str(req).encode("hex") + " " + str(sel).encode("hex")
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def p2p_handover_client(llc):
message = nfc.ndef.HandoverRequestMessage(version="1.2")
message.nonce = random.randint(0, 0xffff)
global include_p2p_req
if include_p2p_req:
data = wpas_get_handover_req()
if (data == None):
summary("Could not get handover request carrier record from wpa_supplicant")
return
print "Handover request carrier record from wpa_supplicant: " + data.encode("hex")
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
global include_wps_req
if include_wps_req:
print "Handover request (pre-WPS):"
try:
print message.pretty()
except Exception, e:
print e
data = wpas_get_handover_req_wps()
if data:
print "Add WPS request in addition to P2P"
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
print "Handover request:"
try:
print message.pretty()
except Exception, e:
print e
print str(message).encode("hex")
client = nfc.handover.HandoverClient(llc)
try:
summary("Trying to initiate NFC connection handover")
client.connect()
summary("Connected for handover")
except nfc.llcp.ConnectRefused:
summary("Handover connection refused")
client.close()
return
except Exception, e:
summary("Other exception: " + str(e))
client.close()
return
summary("Sending handover request")
if not client.send(message):
summary("Failed to send handover request")
client.close()
return
summary("Receiving handover response")
message = client._recv()
if message is None:
summary("No response received")
client.close()
return
if message.type != "urn:nfc:wkt:Hs":
summary("Response was not Hs - received: " + message.type)
client.close()
return
print "Received message"
try:
print message.pretty()
except Exception, e:
print e
print str(message).encode("hex")
message = nfc.ndef.HandoverSelectMessage(message)
summary("Handover select received")
try:
print message.pretty()
except Exception, e:
print e
for carrier in message.carriers:
print "Remote carrier type: " + carrier.type
if carrier.type == "application/vnd.wfa.p2p":
print "P2P carrier type match - send to wpa_supplicant"
if "OK" in wpas_report_handover(data, carrier.record, "INIT"):
success_report("P2P handover reported successfully (initiator)")
else:
summary("P2P handover report rejected")
break
print "Remove peer"
client.close()
print "Done with handover"
global only_one
if only_one:
print "only_one -> stop loop"
global continue_loop
continue_loop = False
global no_wait
if no_wait:
print "Trying to exit.."
global terminate_now
terminate_now = True
class HandoverServer(nfc.handover.HandoverServer):
def __init__(self, llc):
super(HandoverServer, self).__init__(llc)
self.sent_carrier = None
self.ho_server_processing = False
self.success = False
# override to avoid parser error in request/response.pretty() in nfcpy
# due to new WSC handover format
def _process_request(self, request):
summary("received handover request {}".format(request.type))
response = nfc.ndef.Message("\xd1\x02\x01Hs\x12")
if not request.type == 'urn:nfc:wkt:Hr':
summary("not a handover request")
else:
try:
request = nfc.ndef.HandoverRequestMessage(request)
except nfc.ndef.DecodeError as e:
summary("error decoding 'Hr' message: {}".format(e))
else:
response = self.process_request(request)
summary("send handover response {}".format(response.type))
return response
def process_request(self, request):
self.ho_server_processing = True
clear_raw_mode()
print "HandoverServer - request received"
try:
print "Parsed handover request: " + request.pretty()
except Exception, e:
print e
sel = nfc.ndef.HandoverSelectMessage(version="1.2")
found = False
|
samuelmaudo/yepes | yepes/contrib/thumbnails/models.py | Python | bsd-3-clause | 455 | 0.010989 | # -*- codi | ng:utf-8 -*-
from yepes.apps import apps
AbstractConfiguration = apps.get_class('thumbnails.abstract_models', 'AbstractConfiguration')
AbstractSource = apps.get_class('thumbnails.abs | tract_models', 'AbstractSource')
AbstractThumbnail = apps.get_class('thumbnails.abstract_models', 'AbstractThumbnail')
class Configuration(AbstractConfiguration):
pass
class Source(AbstractSource):
pass
class Thumbnail(AbstractThumbnail):
pass
|
dpnova/pynerstat | minerstat/tests/test_minerstat.py | Python | apache-2.0 | 2,307 | 0 | from twisted.trial import unittest
from minerstat.service import MinerStatService
from minerstat.rig import Rig
from minerstat.remote import MinerStatRemoteProtocol, Command
from minerstat.utils import Config
from minerstat.miners.claymore import EthClaymoreMiner
from twisted.internet import task, defer
from mock import Mock, create_autospec
import treq
import os
class MinerStatServiceTest(unittest.TestCase):
def setUp(self):
self.clock = task.Clock()
self.clock.spawnProcess = Mock()
treq_mock = create_autospec(treq)
response_mock = Mock()
response_mock.text.return_value = defer.succeed("")
treq_mock.request.return_value = defer.succeed(response_mock)
self.config = Config.default()
self.config.path = "./"
try:
os.makedirs("clients/algo")
except FileExistsError:
pass
self.remote = MinerStatRemoteProtocol(self.config, treq_mock)
self.rig = Rig(self.config, remote=self.remote, reactor=self.clock)
self.rig.start = Mock(return_value=defer.succeed(None))
self.rig.stop = Mock(return_value=defer.succeed(None))
self.service = MinerStatService(self.rig)
def test_init(self):
MinerStatService(self.rig)
@defer.inlineCallbacks
def test_start_stop(self):
| yield self.service.startService()
self.service.rig.start.assert_called_with()
yield self.service.stopService()
self.service.rig.stop.assert_called_with()
class MinerStatRemoteProtocolTest(unittest.TestCase):
def setUp(self):
self.config = Config("a", "b", "w", "p")
self.prot = MinerStatRemoteProtocol(self.config)
def test_algoinfo(self):
pass
def test_dlconf(self):
pass
def test_send_log(self): |
pass
def test_algo_check(self):
pass
def test_dispatch_remote_command(self):
pass
def test_poll_remote(self):
pass
def test_make_full_url(self):
print(self.prot.make_full_url("foobar"))
class CommandTest(unittest.TestCase):
def test_init(self):
command = Command("foo", None)
self.assertTrue(command)
coin = EthClaymoreMiner()
command2 = Command("foo", coin)
self.assertTrue(command2)
|
google-research/exoplanet-ml | exoplanet-ml/astrowavenet/astrowavenet_model_test.py | Python | apache-2.0 | 33,881 | 0.002952 | # Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for astrowavenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from astrowavenet import astrowavenet_model
from tf_util import configdict
class AstrowavenetTest(tf.test.TestCase):
def assertShapeEquals(self, shape, tensor_or_array):
"""Asserts that a Tensor or Numpy array has the expected shape.
Args:
shape: Numpy array or anything that can be converted to one.
tensor_or_array: tf.Tensor, tf.Variable, or Numpy array.
"""
if isinstance(tensor_or_array, (np.ndarray, np.generic)):
self.assertAllEqual(shape, tensor_or_array.shape)
elif isinstance(tensor_or_array, (tf.Tensor, tf.Variable)):
self.assertAllEqual(shape, tensor_or_array.shape.as_list())
else:
raise TypeError("tensor_or_array must be a Tensor or Numpy ndarray")
def test_build_model(self):
time_series_length = 9
input_num_features = 8
context_num_features = 7
input_placeholder = tf.placeholder(
dtype=tf.float32,
shape=[None, time_series_length, input_num_features],
name="input")
context_placeholder = tf.placeholder(
dtyp | e=tf.float32,
shape=[None, time_series_length, context_num_features],
name="context")
features = {
"autoregressive_input": input_placeholder,
"conditioning_stack": context_placeholder
}
mode = tf.estimator.ModeKeys.TRAIN
hparams = configdict.ConfigDict({
"use_future_context": False,
"predict_n_steps_ahead": 1,
"dilation_kernel_width": 2,
" | skip_output_dim": 6,
"preprocess_output_size": 3,
"preprocess_kernel_width": 5,
"num_residual_blocks": 2,
"dilation_rates": [1, 2, 4],
"output_distribution": {
"type": "normal",
"min_scale": 0.001,
"predict_outlier_distribution": False
}
})
model = astrowavenet_model.AstroWaveNet(features, hparams, mode)
model.build()
variables = {v.op.name: v for v in tf.trainable_variables()}
# Verify variable shapes in two residual blocks.
var = variables["preprocess/causal_conv/kernel"]
self.assertShapeEquals((5, 8, 3), var)
var = variables["preprocess/causal_conv/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/filter/causal_conv/kernel"]
self.assertShapeEquals((2, 3, 3), var)
var = variables["block_0/dilation_1/filter/causal_conv/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/filter/conv1x1/kernel"]
self.assertShapeEquals((1, 7, 3), var)
var = variables["block_0/dilation_1/filter/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/gate/causal_conv/kernel"]
self.assertShapeEquals((2, 3, 3), var)
var = variables["block_0/dilation_1/gate/causal_conv/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/gate/conv1x1/kernel"]
self.assertShapeEquals((1, 7, 3), var)
var = variables["block_0/dilation_1/gate/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/residual/conv1x1/kernel"]
self.assertShapeEquals((1, 3, 3), var)
var = variables["block_0/dilation_1/residual/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_0/dilation_1/skip/conv1x1/kernel"]
self.assertShapeEquals((1, 3, 6), var)
var = variables["block_0/dilation_1/skip/conv1x1/bias"]
self.assertShapeEquals((6,), var)
var = variables["block_1/dilation_4/filter/causal_conv/kernel"]
self.assertShapeEquals((2, 3, 3), var)
var = variables["block_1/dilation_4/filter/causal_conv/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_1/dilation_4/filter/conv1x1/kernel"]
self.assertShapeEquals((1, 7, 3), var)
var = variables["block_1/dilation_4/filter/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_1/dilation_4/gate/causal_conv/kernel"]
self.assertShapeEquals((2, 3, 3), var)
var = variables["block_1/dilation_4/gate/causal_conv/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_1/dilation_4/gate/conv1x1/kernel"]
self.assertShapeEquals((1, 7, 3), var)
var = variables["block_1/dilation_4/gate/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_1/dilation_4/residual/conv1x1/kernel"]
self.assertShapeEquals((1, 3, 3), var)
var = variables["block_1/dilation_4/residual/conv1x1/bias"]
self.assertShapeEquals((3,), var)
var = variables["block_1/dilation_4/skip/conv1x1/kernel"]
self.assertShapeEquals((1, 3, 6), var)
var = variables["block_1/dilation_4/skip/conv1x1/bias"]
self.assertShapeEquals((6,), var)
var = variables["postprocess/conv1x1/kernel"]
self.assertShapeEquals((1, 6, 6), var)
var = variables["postprocess/conv1x1/bias"]
self.assertShapeEquals((6,), var)
var = variables["dist_params/conv1x1/kernel"]
self.assertShapeEquals((1, 6, 16), var)
var = variables["dist_params/conv1x1/bias"]
self.assertShapeEquals((16,), var)
# Verify total number of trainable parameters.
num_preprocess_params = (
hparams.preprocess_kernel_width * input_num_features *
hparams.preprocess_output_size + hparams.preprocess_output_size)
num_gated_params = (
hparams.dilation_kernel_width * hparams.preprocess_output_size *
hparams.preprocess_output_size + hparams.preprocess_output_size +
1 * context_num_features * hparams.preprocess_output_size +
hparams.preprocess_output_size) * 2
num_residual_params = (
1 * hparams.preprocess_output_size * hparams.preprocess_output_size +
hparams.preprocess_output_size)
num_skip_params = (
1 * hparams.preprocess_output_size * hparams.skip_output_dim +
hparams.skip_output_dim)
num_block_params = (
num_gated_params + num_residual_params + num_skip_params) * len(
hparams.dilation_rates) * hparams.num_residual_blocks
num_postprocess_params = (
1 * hparams.skip_output_dim * hparams.skip_output_dim +
hparams.skip_output_dim)
num_dist_params = (1 * hparams.skip_output_dim * 2 * input_num_features +
2 * input_num_features)
total_params = (
num_preprocess_params + num_block_params + num_postprocess_params +
num_dist_params)
total_retrieved_params = 0
for v in tf.trainable_variables():
total_retrieved_params += np.prod(v.shape)
self.assertEqual(total_params, total_retrieved_params)
# Verify model runs and outputs losses of correct shape.
scaffold = tf.train.Scaffold()
scaffold.finalize()
with self.cached_session() as sess:
sess.run([scaffold.init_op, scaffold.local_init_op])
step = sess.run(model.global_step)
self.assertEqual(0, step)
batch_size = 11
feed_dict = {
input_placeholder:
np.random.random(
(batch_size, time_series_length, input_num_features)),
context_placeholder:
np.random.random(
(batch_size, time_series_length, context_num_features))
}
batch_losses, per_example_loss, total_loss = sess.run(
[model.batch_losses, model.per_example_loss, model.total_loss],
feed_dict=feed_dict)
self.assertShapeEquals(
(batch_size, time_series_length, |
executablebooks/mdformat | src/mdformat/renderer/_context.py | Python | mit | 22,558 | 0.000576 | from __future__ import annotations
from collections import defaultdict
from collections.abc import Generator, Iterable, Mapping, MutableMapping
from contextlib import contextmanager
import logging
import re
import textwrap
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NamedTuple
from markdown_it.rules_block.html_block import HTML_SEQUENCES
from mdformat import codepoints
from mdformat._compat import Literal
from mdformat._conf import DEFAULT_OPTS
from mdformat.renderer._util import (
RE_CHAR_REFERENCE,
decimalify_leading,
decimalify_trailing,
escape_asterisk_emphasis,
escape_underscore_emphasis,
get_list_marker_type,
is_tight_list,
is_tight_list_item,
longest_consecutive_sequence,
maybe_add_link_brackets,
)
from mdformat.renderer.typing import Postprocess, Render
if TYPE_CHECKING:
from mdformat.renderer import RenderTreeNode
LOGGER = logging.getLogger(__name__)
# A marker used to point a location where word wrap is allowed
# to occur.
WRAP_POINT = "\x00"
# A marker used to indicate location of a character that should be preserved
# during word wrap. Should be converted to the actual character after wrap.
PRESERVE_CHAR = "\x00"
def make_render_children(separator: str) -> Render:
def render_children(
node: RenderTreeNode,
context: RenderContext,
) -> str:
return separator.join(child.render(context) for child in node.children)
return render_children
def hr(node: RenderTreeNode, context: RenderContext) -> str:
thematic_break_width = 70
return "_" * thematic_break_width
def code_inline(node: RenderTreeNode, context: RenderContext) -> str:
code = node.content
all_chars_are_whitespace = not code.strip()
longest_backtick_seq = longest_consecutive_sequence(code, "`")
if longest_backtick_seq:
separator = "`" * (longest_backtick_seq + 1)
return f"{separator} {code} {separator}"
if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace:
return f"` {code} `"
return f"`{code}`"
def html_block(node: RenderTreeNode, context: RenderContext) -> str:
content = node.content.rstrip("\n")
# Need to strip leading spaces because we do so for regular Markdown too.
# Without the | stripping the raw HTML and Markdown get unaligned and
# semantic may change.
content = content.lstrip()
return content
def html_inline(node: RenderTreeNode, context: RenderContext) -> str:
return node.conten | t
def _in_block(block_name: str, node: RenderTreeNode) -> bool:
while node.parent:
if node.parent.type == block_name:
return True
node = node.parent
return False
def hardbreak(node: RenderTreeNode, context: RenderContext) -> str:
if _in_block("heading", node):
return "<br /> "
return "\\" + "\n"
def softbreak(node: RenderTreeNode, context: RenderContext) -> str:
if context.do_wrap and _in_block("paragraph", node):
return WRAP_POINT
return "\n"
def text(node: RenderTreeNode, context: RenderContext) -> str:
"""Process a text token.
Text should always be a child of an inline token. An inline token
should always be enclosed by a heading or a paragraph.
"""
text = node.content
# Escape backslash to prevent it from making unintended escapes.
# This escape has to be first, else we start multiplying backslashes.
text = text.replace("\\", "\\\\")
text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker.
text = escape_underscore_emphasis(text) # Escape emphasis/strong marker.
text = text.replace("[", "\\[") # Escape link label enclosure
text = text.replace("]", "\\]") # Escape link label enclosure
text = text.replace("<", "\\<") # Escape URI enclosure
text = text.replace("`", "\\`") # Escape code span marker
# Escape "&" if it starts a sequence that can be interpreted as
# a character reference.
text = RE_CHAR_REFERENCE.sub(r"\\\g<0>", text)
# The parser can give us consecutive newlines which can break
# the markdown structure. Replace two or more consecutive newlines
# with newline character's decimal reference.
text = text.replace("\n\n", " ")
# If the last character is a "!" and the token next up is a link, we
# have to escape the "!" or else the link will be interpreted as image.
next_sibling = node.next_sibling
if text.endswith("!") and next_sibling and next_sibling.type == "link":
text = text[:-1] + "\\!"
if context.do_wrap and _in_block("paragraph", node):
text = re.sub(r"\s+", WRAP_POINT, text)
return text
def fence(node: RenderTreeNode, context: RenderContext) -> str:
info_str = node.info.strip()
lang = info_str.split(maxsplit=1)[0] if info_str else ""
code_block = node.content
# Info strings of backtick code fences cannot contain backticks.
# If that is the case, we make a tilde code fence instead.
fence_char = "~" if "`" in info_str else "`"
# Format the code block using enabled codeformatter funcs
if lang in context.options.get("codeformatters", {}):
fmt_func = context.options["codeformatters"][lang]
try:
code_block = fmt_func(code_block, info_str)
except Exception:
# Swallow exceptions so that formatter errors (e.g. due to
# invalid code) do not crash mdformat.
assert node.map is not None, "A fence token must have `map` attribute set"
filename = context.options.get("mdformat", {}).get("filename", "")
warn_msg = (
f"Failed formatting content of a {lang} code block "
f"(line {node.map[0] + 1} before formatting)"
)
if filename:
warn_msg += f". Filename: {filename}"
LOGGER.warning(warn_msg)
# The code block must not include as long or longer sequence of `fence_char`s
# as the fence string itself
fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
fence_str = fence_char * fence_len
return f"{fence_str}{info_str}\n{code_block}{fence_str}"
def code_block(node: RenderTreeNode, context: RenderContext) -> str:
return fence(node, context)
def image(node: RenderTreeNode, context: RenderContext) -> str:
description = _render_inline_as_text(node, context)
if context.do_wrap:
# Prevent line breaks
description = description.replace(WRAP_POINT, " ")
ref_label = node.meta.get("label")
if ref_label:
context.env["used_refs"].add(ref_label)
ref_label_repr = ref_label.lower()
if description.lower() == ref_label_repr:
return f"![{description}]"
return f"![{description}][{ref_label_repr}]"
uri = node.attrs["src"]
assert isinstance(uri, str)
uri = maybe_add_link_brackets(uri)
title = node.attrs.get("title")
if title is not None:
return f''
return f""
def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str:
"""Special kludge for image `alt` attributes to conform CommonMark spec.
Don't try to use it! Spec requires to show `alt` content with
stripped markup, instead of simple escaping.
"""
def text_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def image_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return _render_inline_as_text(node, context)
inline_renderers: Mapping[str, Render] = defaultdict(
lambda: make_render_children(""),
{
"text": text_renderer,
"image": image_renderer,
"link": link,
"softbreak": softbreak,
},
)
inline_context = RenderContext(
inline_renderers, context.postprocessors, context.options, context.env
)
return make_render_children("")(node, inline_context)
def link(node: RenderTreeNode, context: RenderContext) -> str:
if node.info == "auto":
autolink_url = |
dmerejkowsky/twittback | twittback/types.py | Python | mit | 130 | 0 | import typing
import twittback
TweetSequence = | typing.Sequence[twittback.Tweet]
UserSeque | nce = typing.Sequence[twittback.User]
|
xychu/product-definition-center | pdc/settings.py | Python | mit | 10,354 | 0.000676 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
Django settings for pdc project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3hm)=^*sowhxr%m)%_u3mk+!ncy=c)147xbevej%l_lcdogu#+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
ITEMS_PER_PAGE = 50
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'pdc.apps.auth',
'pdc.apps.common',
'pdc.apps.compose',
'pdc.apps.package',
'pdc.apps.release',
'pdc.apps.repository',
'pdc.apps.contact',
'pdc.apps.component',
'pdc.apps.changeset',
'pdc.apps.utils',
'pdc.apps.bindings',
'pdc.apps.usage',
'pdc.apps.osbs',
'mptt',
)
AUTH_USER_MODEL = 'kerb_auth.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'pdc.apps.auth.authentication.TokenAuthenticationWithChangeSet',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
],
'DEFAULT_METADATA_CLASS': 'contrib.bulk_operations.metadata.BulkMetadata',
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'pdc.apps.common.renderers.ReadOnlyBrowsableAPIRenderer',
),
'EXCEPTION_HANDLER': 'pdc.apps.common.handlers.exception_handler',
'DEFAULT_PAGINATION_CLASS': 'pdc.apps.common.pagination.AutoDetectedPageNumberPagination',
'NON_FIELD_ERRORS_KEY': 'detail',
}
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'pdc.apps.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'kobo.django.menu.middleware.MenuMiddleware',
'pdc.apps.usage.middleware.UsageMiddleware',
'pdc.apps.changeset.middleware.ChangesetMiddleware',
'pdc.apps.utils.middleware.MessagingMiddleware',
)
AUTHENTICATION_BACKENDS = (
'pdc.apps.auth.backends.KerberosUserBackend',
#'pdc.apps.auth.backends.AuthMellonUserBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/auth/krb5login'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'pdc.urls'
import kobo
ROOT_MENUCONF = "pdc.menu"
TEMPLATES = [
{
'BACKEND': 'django.te | mplate.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "pdc/templates"),
os.path.join(os.path.dirname(kobo.__file__), "hub", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request', |
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'kobo.django.menu.context_processors.menu_context_processor',
],
},
},
]
WSGI_APPLICATION = 'pdc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = '/usr/share/pdc/static'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "pdc/static"),
"/usr/share/patternfly1/resources",
)
REST_API_URL = 'rest_api/'
REST_API_VERSION = 'v1'
REST_API_PAGE_SIZE = 20
REST_API_PAGE_SIZE_QUERY_PARAM = 'page_size'
REST_API_MAX_PAGE_SIZE = 100
API_HELP_TEMPLATE = "api/help.html"
DIST_GIT_WEB_ROOT_URL = "http://pkgs.example.com/cgit/"
DIST_GIT_RPM_PATH = 'rpms/'
DIST_GIT_REPO_FORMAT = DIST_GIT_WEB_ROOT_URL + DIST_GIT_RPM_PATH + "%s"
DIST_GIT_BRANCH_FORMAT = "?h=%s"
# ldap settings
LDAP_URI = "ldap://ldap.example.com:389"
LDAP_USERS_DN = "ou=users,dc=example,dc=com"
LDAP_GROUPS_DN = "ou=groups,dc=example,dc=com"
LDAP_CACHE_HOURS = 24
#
# CORS settings
#
# The requests can come from any origin (hostname). If this is undesirable, use
# settings_local.py module, set this to False and either set
# CORS_ORIGIN_WHITELIST to a tuple of hostnames that are allowed to contact the
# API, or set CORS_ORIGIN_REGEX_WHITELIST, which again is a tuple of regular
# expressions.
CORS_ORIGIN_ALLOW_ALL = True
# Only the REST API can be accessed. If settings local override REST_API_URL,
# make sure to update this setting as well.
CORS_URLS_REGEX = '^/%s.*$' % REST_API_URL
# We want HTML/JS clients to be able to use Kerberos authentication.
CORS_ALLOW_CREDENTIALS = True
# Allow default headers from django-cors-headers package as well as
# PDC-Change-Comment custom header.
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'pdc-change-comment',
)
# mock kerberos login for debugging
DEBUG_USER = None
BROWSABLE_DOCUMENT_MACROS = {
# need to be rewrite with the real host name when deploy.
'HOST_NAME': 'http://localhost:8000',
# make consistent with rest api root.
'API_PATH': '%s%s' % (REST_API_URL, REST_API_VERSION),
}
EMPTY_PATCH_ERROR_RESPONSE = {
'detail': 'Partial update with no changes does not make much sense.',
'hint': ' '.join(['Please make sure the URL includes the trailing slash.',
'Some software may automatically redirect you the the',
'correct URL but not forward the request body.'])
}
INTERNAL_SERVER_ERROR_RESPONSE = {
'detail': 'The server encountered an internal error or misconfiguration and was unable to complete your request.'
}
# Messaging Bus Config
MESSAGE_BUS = {
# MLP: Messaging Library Package
# e.g. `fedmsg` for fedmsg or `kombu` for AMQP and other transports that `kombu` supports.
# `stomp` for STOMP supports.
'MLP': '',
# # `fedmsg` config example:
# # fedmsg's config is managed by `fedmsg` package, so normally here just need to set the
# # 'MLP' to 'fedmsg'
# 'MLP': 'fedmsg',
#
# # `kombu` config example:
# 'MLP': 'kombu',
# 'URL': 'amqp://guest:guest@example.com:5672//',
# 'EXCHANGE': {
# 'name': 'pdc',
# 'type': 'topic',
# 'durable': False
# },
# 'OPTIONS': {
# # Set these two items to config `kombu` to use ssl.
# 'login_method': 'EXTERNAL',
# 'ssl': {
# 'ca_certs': '',
# 'keyfile': '',
# 'certfile': '',
# 'cert_reqs': ssl.CERT_REQUIRED,
# }
# }
#
# # `stomp` config item |
wangy1931/tcollector | collectors/etc/zfsiostats_conf.py | Python | lgpl-3.0 | 298 | 0.006711 | #!/usr/bin/env python
def get | _config():
config = {
'collection_interval': 15, # Seconds, how often to collect metric data
'report_capacity_every_x_times': 20 # Avoid reporting capacity info too frequently, 0 di | sables capacity reporting
}
return config
|
sshnaidm/ru | plugin.audio.tuneinradio/resources/lib/tunein.py | Python | gpl-2.0 | 40,322 | 0.002505 | #/*
# *
# * TuneIn Radio for XBMC.
# *
# * Copyright (C) 2013 Brian Hornsby
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# */
import sys
import os
import urllib
import urllib2
import re
import ConfigParser
import xml.dom.minidom as minidom
if sys.version_info >= (2, 7):
import json as _json
else:
import simplejson as _json
from . import astralradio as astralradio
from . import streamtheworld as streamtheworld
BASE_URL = 'opml.radiotime.com/'
class TuneIn:
class TuneInError(Exception):
''' Exception raised when an error or invalid response is received.
'''
def __init__(self, status, fault, faultcode=''):
self.status = status
self.fault = fault
self.faultcode = faultcode
def __str__(self):
return repr(self.status)
return repr(self.fault)
return repr(self.faultcode)
def log_debug(self, msg):
if self._debug is True:
print 'TuneIn Library: DEBUG: %s' % msg
def __init__(self, partnerid, serial=None, locale="en-GB", formats=None, https=True, debug=False):
if https is False:
self._protocol = 'http://'
else:
self._protocol = 'https://'
self._global_params = []
self._global_params.append({'param': 'partnerId', 'value': partnerid})
if serial is not None:
self._global_params.append({'param': 'serial', 'value': serial})
self._global_params.append({'param': 'render', 'value': 'json'})
self._global_params.append({'param': 'locale', 'value': locale})
if (formats is not None):
self._global_params.append({'param': 'formats', 'value': formats})
self._debug = debug
self.log_debug('Protocol: %s' % self._protocol)
self.log_debug('Global Params: %s' % self._global_params)
def __add_params_to_url(self, method, fnparams=None, addrender=True, addserial=True):
params = {}
for param in self._global_params:
if (param['param'] == 'render' and addrender is False):
pass
elif (param['param'] == 'serial' and addserial is False):
pass
elif (param['value']):
params[param['param']] = param['value']
for param in fnparams:
if (param['value']):
params[param['param']] = param['value']
url = '%s%s%s?%s' % (
self._protocol, BASE_URL, method, urllib.urlencode(params))
self.log_debug('URL: %s' % url)
return url
def __call_tunein(self, method, params=None):
url = self.__add_params_to_url(method, params)
req = urllib2.Request(url)
f = urllib2.urlopen(req)
result = _json.load(f)
f.close()
return result
def __parse_asf(self, url):
self.log_debug('__parse_asf')
self.log_debug('url: %s' % url)
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
config = ConfigParser.RawConfigParser()
config.readfp(f)
references = config.items('Reference')
for ref in references:
streams.append(ref[1])
f.close()
return streams
def __parse_asx(self, url):
self.log_debug('__parse_asx')
self.log_debug('url: %s' % url)
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
xmlstr = f.read().decode('ascii', 'ignore')
dom = minidom.parseString(xmlstr)
asx = dom.childNodes[0]
for node in asx.childNodes:
if (str(node.localName).lower() == 'entryref' and node.hasAttribute('href')):
streams.append(node.getAttribute('href'))
elif (str(node.localName).lower() == 'entryref' and node.hasAttribute('HREF')):
streams.append(node.getAttribute('HREF'))
elif (str(node.localName).lower() == 'entry'):
for subnode in node.childNodes:
if (str(subnode.localName).lower() == 'ref' and subnode.hasAttribute('href') and not subnode.getAttribute('href') in streams):
streams.append(subnode.getAttribute('href'))
elif (str(subnode.localName).lower() == 'ref' and subnode.hasAttribute('HREF') and not subnode.getAttribute('HREF') in streams):
| streams.append(subnode.getAttribute('HREF'))
f.close()
return streams
def __parse_m3u(self, url):
self.log_debug('__parse_m3u')
self.log_debug('url: %s' % url)
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
for line in f:
if len(line.strip()) > 0 and not line.strip().startswith('#'):
streams.append(line.strip())
f.close()
return streams
def __parse_pls(self, url):
self.log_debug('__parse_pls')
self.log_debug('url: %s' % url)
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
config = ConfigParser.RawConfigParser()
config.readfp(f)
numentries = config.getint('playlist', 'NumberOfEntries')
while (numentries > 0):
streams.append(
config.get('playlist', 'File' + str(numentries)))
numentries -= 1
f.close()
return streams
def __result_ok(self, result):
return result['head']['status'] != '200'
def __result_status(self, result):
return int(result['head']['status'])
def __result_fault(self, result):
if ('fault' in result['head']):
return result['head']['fault']
else:
return ''
def __result_fault_code(self, result):
if ('fault_code' in result['head']):
return result['head']['fault_code']
else:
return ''
def is_category_id(self, id):
''' Returns True if argument is a TuneIn category id.
'''
if (not id or len(id) == 0 or id[0] != 'c' or not id[1:].isdigit()):
return False
return True
def is_folder_id(self, id):
''' Returns True if argument is a TuneIn folder id.
'''
if (not id or len(id) == 0 or id[0] != 'f' or not id[1:].isdigit()):
return False
return True
def is_genre_id(self, id):
''' Returns True if argument is a TuneIn genre id.
'''
if (not id or len(id) == 0 or id[0] != 'g' or not id[1:].isdigit()):
return False
return True
def is_artist_id(self, id):
''' Returns True if argument is a TuneIn artist id.
'''
if (not id or len(id) == 0 or id[0] != 'm' or not id[1:].isdigit()):
return False
return True
def is_region_id(self, id):
''' Returns True if argument is a TuneIn region id.
'''
if (not id or len(id) == 0 or id[0] != 'r' or not id[1:].isdigit()):
return False
return True
def is_show_id(self, id):
''' Returns True if argument is a TuneIn show id.
'''
if (not id or len(id) == 0 or id[0] != 'p' or not id[1:].isdigit()):
return False
return True
def is_station_id(self, id):
''' Returns True if argument is a TuneIn station id.
'''
if (not id or len(id) == 0 or id[0] != 's' or not id[1:].isdigit()):
return False
return True
def is_topic_id(self, id):
''' Returns True if | |
TheTimmy/spack | var/spack/repos/builtin/packages/saws/package.py | Python | lgpl-2.1 | 1,762 | 0 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have | received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, M | A 02111-1307 USA
##############################################################################
from spack import *
class Saws(AutotoolsPackage):
"""The Scientific Application Web server (SAWs) turns any C or C++
scientific or engineering application code into a webserver,
allowing one to examine (and even modify) the state of the
simulation with any browser from anywhere."""
homepage = "https://bitbucket.org/saws/saws/wiki/Home"
version('develop', git='https://bitbucket.org/saws/saws.git', tag='master')
version('0.1.0', git='https://bitbucket.org/saws/saws.git', tag='v0.1.0')
|
openfisca/openfisca-tunisia | openfisca_tunisia/model/revenus/autres_revenus.py | Python | agpl-3.0 | 1,059 | 0 | # -*- coding: utf-8 -*-
from openfisca_tunisia.model.base import *
# Autres revenus
class salaire_etranger(Variable):
value_type = int
label = "Salaires perçus à l'étranger"
entity = Individu
definition_period = YEAR
class pension_etranger_non_transferee(Variable):
value_type = int
label = "Pensions perçues à l'étranger (non transférées)"
entity = Individu
definition_period = YEAR
class pension_etranger_transferee(Variable):
value_type = int
label = "Pensions perçues à l'étranger | (transférées en Tunisie)"
entity = Individu
definition_period = YEAR
class autres_revenus_etranger(Variable):
value_type = int
label = "Autres revenus perçus à l'étranger"
entity = Individu
definition_period = YEAR
# Revenus exonérés
# Revenus non imposables
# deficit antérieurs non déduits
class deficits_anterieurs_non_deduits(Variable):
value_type = int
label = "Déficits des années antérieures non dé | duits"
entity = Individu
definition_period = YEAR
|
JoseBlanca/franklin | test/mapping_test.py | Python | agpl-3.0 | 6,577 | 0.002889 | '''
Created on 2010 aza 30
@author: peio
It test the mapping module of franklin
'''
import unittest, os, StringIO
from os.path import join, exists
from tempfile import NamedTemporaryFile
from franklin.utils.misc_utils import TEST_DATA_DIR, NamedTemporaryDir
from franklin.mapping import map_reads_with_gmap, map_reads_with_bwa
from franklin.sam import bam2sam
SOLEXA = '@seq1\n'
SOLEXA += 'TCATTGAAAGTTGAAACTGATAGTAGCAGAGTTTTTTCCTCTGTTTGG\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIIIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq2\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq14\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq15\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq12\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq13\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq16\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq17\n'
SOLEXA += 'atgtcgtacatattggcattgcagtcagcggtatctagtgctaggtaa\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
class GmapTest(unittest.TestCase):
'It test the gmap mapper'
@staticmethod
def test_gmap_mapper():
'It test the gmap mapper'
mappers_dir = join(TEST_DATA_DIR, 'mappers')
gmap_dir = join(TEST_DATA_DIR, 'mappers', 'gmap')
| work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, | 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fpath = join(gmap_dir, 'lb_lib1.pl_sanger.sm_sam1.fa')
out_bam_fhand = NamedTemporaryFile(suffix='.bam')
parameters = {'threads':None, 'kmer':13}
map_reads_with_gmap(temp_genome, reads_fpath, out_bam_fhand.name,
parameters)
sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fhand.name, sam_fhand.name, header=True)
result = open(sam_fhand.name).read()
assert exists(out_bam_fhand.name)
assert '36M2I204M' in result
assert 'SN:SL2.30ch00' in result
assert 'seq9_rev_MOD' in result
work_dir.close()
out_bam_fhand.close()
sam_fhand.close()
work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fpath = join(gmap_dir, 'lb_lib1.pl_sanger.sm_sam1.sfastq')
out_bam_fhand = NamedTemporaryFile(suffix='.bam')
unmapped_fhand = StringIO.StringIO()
parameters = {'threads':None, 'kmer':13,
'unmapped_fhand':unmapped_fhand}
map_reads_with_gmap(temp_genome, reads_fpath, out_bam_fhand.name,
parameters)
sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fhand.name, sam_fhand.name, header=True)
result = open(sam_fhand.name).read()
assert exists(out_bam_fhand.name)
assert '36M2I204M' in result
assert 'SN:SL2.30ch00' in result
assert 'seq9_rev_MOD' in result
assert '?????????????????' in result
work_dir.close()
out_bam_fhand.close()
sam_fhand.close()
@staticmethod
def test_gmap_without_mapping_output():
'''It test that the gmap doesn't map anything'''
mappers_dir = join(TEST_DATA_DIR, 'mappers')
cmap_dir = join(TEST_DATA_DIR, 'mappers', 'gmap')
work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fhand = NamedTemporaryFile()
reads_fhand.write('>seq\natgtgatagat\n')
reads_fhand.flush()
out_bam_fhand = NamedTemporaryFile()
out_bam_fpath = out_bam_fhand.name
out_bam_fhand.close()
parameters = {'threads':None, 'kmer':13}
map_reads_with_gmap(temp_genome, reads_fhand.name, out_bam_fpath,
parameters)
reads_fhand.close()
temp_sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fpath, temp_sam_fhand.name, True)
result = open(temp_sam_fhand.name).read()
assert 'seq\t4\t*\t0\t0' in result
class BwaTest(unittest.TestCase):
'It test the bwa mapper'
@staticmethod
def test_bwa_mapping():
'''It test that the gmap doesn't map anything'''
reference = join(TEST_DATA_DIR, 'blast/arabidopsis_genes')
work_dir = NamedTemporaryDir()
reference_fpath = join(work_dir.name, 'arabidopsis_genes')
os.symlink(reference, reference_fpath)
reads_fhand = NamedTemporaryFile(suffix='.sfastq')
reads_fhand.write(SOLEXA)
reads_fhand.flush()
out_bam_fhand = NamedTemporaryFile()
out_bam_fpath = out_bam_fhand.name
out_bam_fhand.close()
parameters = {'colorspace': False, 'reads_length':'short',
'threads':None, 'java_conf':None}
map_reads_with_bwa(reference_fpath, reads_fhand.name, out_bam_fpath,
parameters)
test_sam_fhand = NamedTemporaryFile(suffix='sam')
bam2sam(out_bam_fpath, test_sam_fhand.name)
result = open(test_sam_fhand.name).read()
assert 'seq17' in result
unmapped_fhand = StringIO.StringIO()
parameters = {'colorspace': False, 'reads_length':'short',
'threads':None, 'java_conf':None,
'unmapped_fhand':unmapped_fhand}
map_reads_with_bwa(reference_fpath, reads_fhand.name, out_bam_fpath,
parameters)
assert 'seq17' in unmapped_fhand.getvalue()
test_sam_fhand = NamedTemporaryFile(suffix='sam')
bam2sam(out_bam_fpath, test_sam_fhand.name)
result = open(test_sam_fhand.name).read()
assert 'seq17' not in result
if __name__ == "__main__":
import sys;sys.argv = ['', 'BwaTest.test_bwa_mapping']
unittest.main()
|
jabbalaci/PrimCom | modules/process.py | Python | gpl-2.0 | 554 | 0 | #!/usr/bin/env python
# encoding: utf-8
"""
# from modules import process
"""
import shlex
from subprocess import PIPE, Popen
def get_exitcode_stdout_ | stderr(cmd):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
#
return exitcode, out, err
########################################################### | ###################
if __name__ == "__main__":
pass
|
tboyce021/home-assistant | homeassistant/components/transmission/sensor.py | Python | apache-2.0 | 6,100 | 0.000984 | """Support for monitoring the Transmission BitTorrent client API."""
from homeassistant.const import CONF_NAME, DATA_RATE_MEGABYTES_PER_SECOND, STATE_IDLE
from homeassistant.core import cal | lback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
CONF_LIMIT,
CONF_ORDER,
DOMAIN,
STATE_ATTR_TORRENT_INFO,
SUPPORTED_ORDER_MODES,
| )
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Transmission sensors."""
tm_client = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
dev = [
TransmissionSpeedSensor(tm_client, name, "Down Speed", "download"),
TransmissionSpeedSensor(tm_client, name, "Up Speed", "upload"),
TransmissionStatusSensor(tm_client, name, "Status"),
TransmissionTorrentsSensor(tm_client, name, "Active Torrents", "active"),
TransmissionTorrentsSensor(tm_client, name, "Paused Torrents", "paused"),
TransmissionTorrentsSensor(tm_client, name, "Total Torrents", "total"),
TransmissionTorrentsSensor(tm_client, name, "Completed Torrents", "completed"),
TransmissionTorrentsSensor(tm_client, name, "Started Torrents", "started"),
]
async_add_entities(dev, True)
class TransmissionSensor(Entity):
"""A base class for all Transmission sensors."""
def __init__(self, tm_client, client_name, sensor_name, sub_type=None):
"""Initialize the sensor."""
self._tm_client = tm_client
self._client_name = client_name
self._name = sensor_name
self._sub_type = sub_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._client_name} {self._name}"
@property
def unique_id(self):
"""Return the unique id of the entity."""
return f"{self._tm_client.api.host}-{self.name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""Return the polling requirement for this sensor."""
return False
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._tm_client.api.available
async def async_added_to_hass(self):
"""Handle entity which will be added."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(
self.hass, self._tm_client.api.signal_update, update
)
)
class TransmissionSpeedSensor(TransmissionSensor):
"""Representation of a Transmission speed sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return DATA_RATE_MEGABYTES_PER_SECOND
def update(self):
"""Get the latest data from Transmission and updates the state."""
data = self._tm_client.api.data
if data:
mb_spd = (
float(data.downloadSpeed)
if self._sub_type == "download"
else float(data.uploadSpeed)
)
mb_spd = mb_spd / 1024 / 1024
self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1)
class TransmissionStatusSensor(TransmissionSensor):
"""Representation of a Transmission status sensor."""
def update(self):
"""Get the latest data from Transmission and updates the state."""
data = self._tm_client.api.data
if data:
upload = data.uploadSpeed
download = data.downloadSpeed
if upload > 0 and download > 0:
self._state = "Up/Down"
elif upload > 0 and download == 0:
self._state = "Seeding"
elif upload == 0 and download > 0:
self._state = "Downloading"
else:
self._state = STATE_IDLE
else:
self._state = None
class TransmissionTorrentsSensor(TransmissionSensor):
"""Representation of a Transmission torrents sensor."""
SUBTYPE_MODES = {
"started": ("downloading"),
"completed": ("seeding"),
"paused": ("stopped"),
"active": ("seeding", "downloading"),
"total": None,
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "Torrents"
@property
def device_state_attributes(self):
"""Return the state attributes, if any."""
info = _torrents_info(
torrents=self._tm_client.api.torrents,
order=self._tm_client.config_entry.options[CONF_ORDER],
limit=self._tm_client.config_entry.options[CONF_LIMIT],
statuses=self.SUBTYPE_MODES[self._sub_type],
)
return {
STATE_ATTR_TORRENT_INFO: info,
}
def update(self):
"""Get the latest data from Transmission and updates the state."""
torrents = _filter_torrents(
self._tm_client.api.torrents, statuses=self.SUBTYPE_MODES[self._sub_type]
)
self._state = len(torrents)
def _filter_torrents(torrents, statuses=None):
return [
torrent
for torrent in torrents
if statuses is None or torrent.status in statuses
]
def _torrents_info(torrents, order, limit, statuses=None):
infos = {}
torrents = _filter_torrents(torrents, statuses)
torrents = SUPPORTED_ORDER_MODES[order](torrents)
for torrent in torrents[:limit]:
info = infos[torrent.name] = {
"added_date": torrent.addedDate,
"percent_done": f"{torrent.percentDone * 100:.2f}",
"status": torrent.status,
"id": torrent.id,
}
try:
info["eta"] = str(torrent.eta)
except ValueError:
pass
return infos
|
daleloogn/deeplearning-turtorial | rnn_ctc/ctc.py | Python | gpl-2.0 | 1,769 | 0.000565 | import theano
import theano.tensor as tt
from activations import share, init_wts
class SoftmaxLayer():
def __init__(self, inpt, in_sz, n_classes, ):
b = share(init_wts(n_classes))
w = share(init_wts(in_sz, n_classes))
self.output = tt.nnet.softmax(tt.dot(inpt, w) + b)
self.params = [w, b]
class CTCLayer():
def __init__(self, inpt, labels, blank):
"""
Recurrent Relation:
A matrix that specifies allowed transistions in paths.
At any time, one could
0) Stay at the same label (diagonal is identity)
1) Move to the next label (first upper diagonal is identity)
2) Skip to the next to next label if
a) next label is blank and
b) the next to next label is different from the current
(Second upper diagonal is product of conditons a & b)
"""
labels2 = tt.concatenate((labels, [blank, blank]))
sec_diag = tt.neq(labels2[:-2], labels2[2:]) * tt.eq(labels2[1:-1],
blank)
n_labels = labels.shape[0]
recurrence_relation = \
| tt.eye(n_labels) + \
tt.eye(n_labels, k=1) + \
tt.eye(n_labels, k=2) * sec_diag.dimshuffle((0, 'x'))
'''
Forward path probabilities
'''
pred_y = inpt[:, labels]
probabilities, _ = theano.scan(
lambda curr, prev: cu | rr * tt.dot(prev, recurrence_relation),
sequences=[pred_y],
outputs_info=[tt.eye(n_labels)[0]]
)
# Final Costs
labels_probab = tt.sum(probabilities[-1, -2:])
self.cost = -tt.log(labels_probab)
self.params = []
self.debug = probabilities.T |
Ondross/statsq | correlation_test.py | Python | mit | 3,096 | 0 | from argparse import ArgumentParser
import matplotlib.pyplot as plt
import random
import correlation as corr
OPTIONS = [-1, 1] # Possible Y values.
def newData(options, n):
return [random.choice(options) for i in range(n)]
def showData(dataA, dataB):
if len(dataA) != len(dataB):
raise ValueError("Datasets must be equal length")
xs = range(len(dataA)+1)
fig = plt.figure()
# Create simple axes and define their size.
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_ylim(ymin=min(dataA+dataB)-1, ymax=max(dataA+dataB)+1)
# Offset data for visibility
dataA = [x + abs(x/50.0) for x in dataA]
xs2 = [x+.05 for x in xs]
xs2[-1] -= .05
# Duplicate first point, to create a horizontal line.
dataA = dataA[:1] + dataA
dataB = dataB[:1] + dataB
# Show data as step functions.
ax.step(xs, dataA, color="g")
ax.step(xs2, dataB, color="b")
ax.grid()
ax.set_yticks((-1, 1))
# Non-blocking.
plt.draw()
class Correlation(object):
"""
Two datasets and their correlation.
"""
def __init__(self, dataA, dataB):
self.dataA = dataA
self.dataB = dataB
self.coefficient = None
self.offset = None
def | calcCorrelation(self):
"""
Calls the binary correlation algorithm and selects the correlation
with the optimal offset.
"""
potential = corr.calcCorrelates(self.dataA, self.dataB)
maxCorrelation = (0, 0)
for correlation in potential:
if abs(correlation[1]) > abs(maxCorrelation[1] | ):
maxCorrelation = correlation
self.coefficient = maxCorrelation[1]
self.offset = maxCorrelation[0]
if __name__ == "__main__":
parser = ArgumentParser("usage: %prog [options]")
parser.add_argument("-n",
dest="numDatasets",
default=100,
type=int,
help="Number of random datasets to test.")
parser.add_argument("-l",
dest="length",
default=20,
type=int,
help="Number of each dataset.")
args = parser.parse_args()
numDatasets = args.numDatasets
if numDatasets < 2:
raise ValueError("At least two datasets must be compared.")
datasets = [newData(OPTIONS, args.length) for x in range(numDatasets)]
# Correlate every pair.
correlations = []
for idxi, i in enumerate(datasets):
for idxj, j in enumerate(datasets):
# Ignore duplicates.
if idxi > idxj:
c = Correlation(i, j)
c.calcCorrelation()
correlations.append(c)
# Sort based on max correlation.
correlations.sort(key=lambda x: abs(x.coefficient), reverse=True)
# Plot top five correlations.
for c in correlations[0:min(5, numDatasets)]:
showData(c.dataA, c.dataB)
print c.coefficient, c.offset
# Blocking call to Matplotlib so program doesn't quit.
plt.show()
|
BackupTheBerlios/freespeak | freespeak/ui/translation_label.py | Python | gpl-2.0 | 6,576 | 0.014294 | # FreeSpeak - a GUI frontend to online translator engines
# freespeak/ui/translation.py
#
## Copyright (C) 2005, 2006, 2007, 2008, 2009 Luca Bruno <lethalman88@gmail.com>
##
## This file is part of FreeSpeak.
##
## FreeSpeak is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## FreeSpeak is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Library General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Classes for creating a full-featured notebook translation tab
"""
import gtk
from freespeak.ui.spinner import Spinner
import freespeak.ui.utils as uiutils
class TranslationLabel (gtk.HBox):
"""
The label contains a pixbuf, a label and a close button.
Also a popup menu is available if the user right clicks the tab.
If the user double clicks the tab label, it becomes an entry able to change
the label of the tab.
"""
ui_string = """<ui>
<popup name="PopupMenu">
<menuitem action="Close" />
</popup>
</ui>"""
def __init__ (self, application, translation):
gtk.HBox.__init__ (self, spacing=2)
self.application = application
self.translation = translation
self.title = 'Unnamed'
self.is_custom = False
self.setup_icon ()
self.setup_label ()
self.setup_entry ()
self.setup_menu ()
self.setup_event_box ()
self.setup_close ()
self.be_label ()
self.pack_start (self.icon)
self.pack_start (self.event_box)
self.pack_start (self.close, False, padding=4)
def setup_icon (self):
"""
Setup the spinner icon
"""
self.icon = Spinner (self.application, None)
self.icon.show ()
def setup_label (self):
"""
Setup the label
"""
self.label = gtk.Label ()
self.label.show ()
def setup_entry (self):
"""
Setup the entry that will take place for modifying the label
"""
self.entry = gtk.Entry ()
self.entry_focus_out = self.entry.connect ('focus-out-event',
self.on_entry_activate)
self.entry_activate_handler = self.entry.connect ('activate',
self.on_entry_activate)
self.entry.show ()
def setup_event_box (self):
"""
The event box needed to grab events for the child widgets
"""
self.event_box = gtk.EventBox ()
self.event_box.set_visible_window (False)
self.event_box.connect ('button-press-event',
self.on_event_box_button_press_event)
self.event_box.connect ('key-press-event',
self.on_event_box_key_press_event)
self.event_box.show ()
def setup_close (self):
"""
Setup the close tiny-button
"""
self.close = uiutils.TinyButton (gtk.STOCK_CLOSE)
self.close.set_tooltip_text (_("Close this translation"))
self.close.conne | ct ('clicked', self.on_close)
self.close.show ()
def setup_menu (self):
"""
Setup the popup menu
"""
self.action_group = gtk.ActionGroup ('PopupActions')
actions = (
('Close', gtk.STOCK_CLOSE, None, None,
_('Close this translation'), self.on_close),
)
self.action_group.add_actions (actions)
| self.ui = gtk.UIManager ()
self.ui.insert_action_group (self.action_group, 0)
self.ui.add_ui_from_string (self.ui_string)
self.menu = self.ui.get_widget ("/PopupMenu")
def drop_child (self):
"""
Drop the event box child, which would be a label or an entry
"""
child = self.event_box.get_child ()
if child:
self.event_box.remove (child)
def be_label (self):
"""
Drop the entry and become a label
"""
self.drop_child ()
self.label.set_text (self.title)
self.event_box.add (self.label)
def be_entry (self):
"""
Drop the label and become an entry
"""
self.drop_child ()
self.entry.set_text (self.title)
self.event_box.add (self.entry)
def set_suggested_title (self, title):
"""
Set the suggested title of the page
"""
if not self.is_custom:
self.title = title
self.label.set_text (title)
def start_loading (self):
"""
Make the tab busy by animating the spinner
"""
self.icon.start ()
def stop_loading (self):
"""
Stop animating the spinner
"""
self.icon.stop ()
# Events
def on_event_box_button_press_event (self, event_box, event):
"""
Handle both the double click and the right click
"""
if event.type == gtk.gdk._2BUTTON_PRESS:
self.be_entry ()
self.entry.grab_focus ()
elif event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
self.menu.popup (None, None, None, event.button, event.time, None)
def on_event_box_key_press_event (self, event_box, event):
"""
Handle the ESC key as a good key for becoming back a label
and reject the entry changes
"""
# ESC key
if event.keyval == 65307:
self.entry.handler_block (self.entry_focus_out)
self.be_label ()
self.entry.handler_unblock (self.entry_focus_out)
def on_entry_activate (self, entry, *args):
"""
Accept changes when the user activates the entry
"""
self.title = entry.get_text ()
self.entry.handler_block (self.entry_focus_out)
self.be_label ()
self.entry.handler_unblock (self.entry_focus_out)
self.is_custom = True
def on_close (self, button):
"""
Close the translation
"""
self.translation.close ()
__all__ = ['TranslationLabel']
|
CloudServer/cinder | cinder/objects/backup.py | Python | apache-2.0 | 5,187 | 0 | # Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import base
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class Backup(base.CinderPersist | entObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(),
'user_id': fields.UUIDField(),
'project_id': fields.UUIDField(),
'volume_id': fields.UUIDField(),
'host': fie | lds.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'container': fields.StringField(nullable=True),
'parent_id': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'fail_reason': fields.StringField(nullable=True),
'size': fields.IntegerField(),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
# NOTE(dulek): Metadata field is used to store any strings by backup
# drivers, that's why it can't be DictOfStringsField.
'service_metadata': fields.StringField(nullable=True),
'service': fields.StringField(nullable=True),
'object_count': fields.IntegerField(),
'temp_volume_id': fields.StringField(nullable=True),
'temp_snapshot_id': fields.StringField(nullable=True),
}
obj_extra_fields = ['name']
@property
def name(self):
return CONF.backup_name_template % self.id
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
target_version = utils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, backup, db_backup):
for name, field in backup.fields.items():
value = db_backup.get(name)
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
backup[name] = value
backup._context = context
backup.obj_reset_changes()
return backup
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_backup = db.backup_get(context, id)
return cls._from_db_object(context, cls(context), db_backup)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.cinder_obj_get_changes()
db_backup = db.backup_create(self._context, updates)
self._from_db_object(self._context, self, db_backup)
@base.remotable
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
db.backup_update(self._context, self.id, updates)
self.obj_reset_changes()
@base.remotable
def destroy(self):
with self.obj_as_admin():
db.backup_destroy(self._context, self.id)
@base.CinderObjectRegistry.register
class BackupList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Backup'),
}
child_versions = {
'1.0': '1.0'
}
@base.remotable_classmethod
def get_all(cls, context, filters=None):
backups = db.backup_get_all(context, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_host(cls, context, host):
backups = db.backup_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_project(cls, context, project_id, filters=None):
backups = db.backup_get_all_by_project(context, project_id, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_volume(cls, context, volume_id, filters=None):
backups = db.backup_get_all_by_volume(context, volume_id, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
|
convexengineering/gplibrary | gpkitmodels/SP/aircraft/tail/tail_boom_flex.py | Python | mit | 1,453 | 0.001376 | " tail boom flexibility "
from numpy import pi
from gpkit import Model, parse_variables, SignomialsEnabled
class TailBoomFlexibility(Model):
""" Tail Boom Flexibility Model
Variables
---------
Fne [-] tail boom flexibility factor
deda [-] wing downwash derivative
SMcorr 0.55 [-] corrected static margin
sph1 [-] flexibility helper variable 1
sph2 [-] flexibility helper variable 2
LaTex Strings
-------------
Fne F_{\mathrm{NE}}
deda d\\epsilon/d\\alpha
SMcorr SM_{\\mathrm{corr}}
"""
@par | se_variables(__doc__, globals())
| def setup(self, htail, hbending, wing):
mh = htail.mh
mw = wing.mw
Vh = htail.Vh
th = hbending.th
CLhmin = htail.CLhmin
CLwmax = wing.planform.CLmax
Sw = wing.planform.S
bw = wing.planform.b
lh = htail.lh
CM = wing.planform.CM
constraints = [
Fne >= 1 + mh*th,
sph1*(mw*Fne/mh/Vh) + deda <= 1,
sph2 <= Vh*CLhmin/CLwmax,
# (sph1 + sph2).mono_lower_bound({"sph1": .48, "sph2": .52}) >= (
# SMcorr + wing["C_M"]/wing["C_{L_{max}}"]),
deda >= mw*Sw/bw/4/pi/lh]
with SignomialsEnabled():
constraints.extend([sph1 + sph2 >= SMcorr + CM/CLwmax])
return constraints
|
tommeagher/pycar14 | project2/baseball_complete.py | Python | mit | 5,984 | 0.012032 | import csv
import operator
import math
from pprint import pprint
#First, let's see what kind of data we have to work with
def calculate_top10 (filename):
#Open the salary csv
salaries_object = open(filename, 'rb')
#Make the file object usable
salary_data = csv.reader(salaries_object)
#Create your header row and look at what is in here
header_row = salary_data.next()
#Find the index of the year and player id
print header_row
#Check the type of the year | column to see if it is a string or int
sample_data = salary_data.next()
print '%s is %s' % (sample_data[0], type(sample_data[0]))
#Because we're on the first row of data, we need to
#return to the top before we do anything with this.
#We do this by resetting the pointer in the original file.
salaries_object.seek(0)
#Arrange in descending order of salary
#Remember that lists always keep their order!
sorted_salaries = sorted(salary_data, key=operator.ite | mgetter(4), reverse=True)
#Create a list of the top 10%
top_percentile = len(sorted_salaries) * .10
#Round it!
rounded_salaries = math.floor(top_percentile)
#We don't want decimal points (you can't have part of a player)
#so cast to an int
int_salaries = int(rounded_salaries)
#You could do the above steps in one line like this:
#int(math.floor(len(sorted_salaries * .10)))
#Now let's create our final list, of just the highest-paid players
cream_of_the_crop = []
#We only need the player IDs right now.
for index, row in enumerate(sorted_salaries):
if index > 0 and index <= int_salaries:
cream_of_the_crop.append(row[3])
return cream_of_the_crop
#We are going to be working with dictionaries to make things easier
def create_salary_dict(filename, cream_of_the_crop):
#Open the csv
salaries_object = open(filename, 'rb')
#This time, let's use DictReader,
#which maps the header row's values to each item in each row
player_dict = csv.DictReader(salaries_object)
#Create new list of only 2013 information
#NOTE: You can't start a variable with a number, so 2013_salaries won't work
salaries_2013 = {}
for row in player_dict:
#Using DictReader allows us to access rows by their column name!
year = row["yearID"]
if year == '2013':
#Create a record for each player's ID and assign it the salary
salaries_2013[row["playerID"]] = row["salary"]
#Now we can reference the salary of any player whose ID we know.
#But we only want those who were in the top 10% of all time.
#Create a new dict to hold just the top players from 2013
top_salaries_2013 = {}
#Let's compare our player dict with the list of all-time
#high salaries we made in the first function.
#(You could combine this step with the DictReader step above.)
for player in cream_of_the_crop:
#Check for the presence of a key that matches the playerID in salaries_2013
if player in salaries_2013:
top_salaries_2013[player] = { "salary": salaries_2013[player] }
return top_salaries_2013
def add_player_stats(top_salaries_dict, master_file):
#Open the master csv
master_object = open(master_file, 'rb')
#Read the file
master_data = csv.DictReader(master_object)
#Let's look at one record of the master data to get the headers
print master_data.next()
#That's a little hard to read, isn't it? Try prettyprint instead.
pprint(master_data.next())
#Reset the generator and skip the header row
master_object.seek(0)
master_data.next()
#Create a dict of the master file with DictReader
master_dict = {}
#Assemble the troops
for row in master_data:
master_dict[row["playerID"]] = {
"first_name": row["nameFirst"],
"given_name": row["nameGiven"],
"last_name": row["nameLast"],
"height": row["height"],
"weight": row["weight"],
"birth_city": row["birthCity"],
"birth_state": row["birthState"],
"birth_country": row["birthCountry"],
"birthdate": '%s-%s-%s' %(row["birthDay"], row["birthMonth"], row["birthYear"]),
"death_city": row["deathCity"],
"death_state": row["deathState"],
"death_country": row["deathCountry"],
"deathdate": '%s-%s-%s' %(row["deathDay"], row["deathMonth"], row["deathYear"]),
"bats": row["bats"],
"throws": row["throws"],
"debut": row["debut"],
"final_game": row["finalGame"]
}
#Loop over the top salaries dict to find the player IDs in the master dict
#We could also loop over the master dict to find which of those exist
#in the top salaries dict, but that would be less efficient.
#When you loop over a dict, you only have access to the keys.
#To access the values, we need .iteritems()
#Remember the key is the player ID and the value is the salary.
#Typically, when iterating over a dict, the syntax is:
#for key, value in my_dict.iteritems():
#For clarity, we will use the header row values instead.
#Add names, birth state and birth country to the dict
for playerID, salary in top_salaries_dict.iteritems():
top_salaries_dict.update({ playerID: {
'first_name': master_dict[playerID]["first_name"],
'last_name': master_dict[playerID]["last_name"],
'birth_state': master_dict[playerID]["birth_state"],
'birth_country': master_dict[playerID]["birth_country"]}
})
return top_salaries_dict
salary_file = 'data/2013/Salaries.csv'
#TODO: Make this so we can walk a folder structure
master_file = 'data/2013/Master.csv'
top10 = calculate_top10(salary_file)
top_salaries_dict = create_salary_dict(top10)
final_file = add_player_stats(top_salaries_dict, master_file)
#write to a file
|
benabraham/cz.pycon.org-2017 | pyconcz_2017/speakers/migrations/0022_auto_20170524_2115.py | Python | mit | 566 | 0.001767 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-24 19:15
f | rom __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('speakers', '0021_auto_20170524_2024'),
]
operations = [
migrations.AlterField(
model_name='slot',
name='room',
field=models.PositiveSmallIntegerField(choices=[(1, 'Big hall'), (2, 'Theatre'), (4, 346), (5, 347), (6, 301), (7, 302), (8, 303), (9, 343), (3, 'Foyer') | ]),
),
]
|
vincentlooi/FCIS | fcis/symbols/resnet_v1_101_fcis.py | Python | apache-2.0 | 79,613 | 0.007248 | # --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haozhi Qi, Guodong Zhang, Yi Li
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.proposal import *
from operator_py.proposal_annotator import *
from operator_py.box_parser import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fcis(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
def get_resnet_v1_conv4(self, data):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2),
no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3),
stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True,
| fix_gamma=False, eps=self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol. | BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1),
|
fbessez/Tinder | fb_auth_token.py | Python | mit | 2,140 | 0.002336 | # Used from https://github.com/philipperemy/Deep-Learning-Tinder/blob/master/tinder_token.py
import re
import requests
import robobrowser
MOBILE_USER_AGENT = "Tinder/7.5.3 (iPhone; iOS 10.3.2; Scale/2.00)"
FB_AUTH = "https://www.facebook.com/v2.6/dialog/oauth?redirect_uri=fb464891386855067%3A%2F%2Fauthorize%2F&display=touch&state=%7B%22challenge%22%3A%22IUUkEUqIGud332lfu%252BMJhxL4Wlc%253D%22%2C%220_auth_logger_id%22%3A%2230F06532-A1B9-4B10-BB28-B29956C71AB1%22%2C%22com.facebook.sdk_client_state%22%3Atrue%2C%223_method%22%3A%22sfvc_auth%22%7D&scope=user_birthday%2Cuser_photos%2Cuser_education_history%2Cemail%2Cuser_rel | ationship_details%2Cuser_friends%2Cuser_work_history%2Cuser_likes&response_type=token%2Csigned_request&default_audience=friends&return_scopes=true&auth_type=rerequest&client_id=464891386855067&ret=login&sdk=ios&logger_id=30F06532-A1B9-4B10-BB28-B29956C71AB1&ext=1470840777&hash=AeZqkIcf-NEW6vBd"
def get_fb_a | ccess_token(email, password):
s = robobrowser.RoboBrowser(user_agent=MOBILE_USER_AGENT, parser="lxml")
s.open(FB_AUTH)
f = s.get_form()
f["pass"] = password
f["email"] = email
s.submit_form(f)
f = s.get_form()
try:
s.submit_form(f, submit=f.submit_fields['__CONFIRM__'])
access_token = re.search(
r"access_token=([\w\d]+)", s.response.content.decode()).groups()[0]
return access_token
except requests.exceptions.InvalidSchema as browserAddress:
access_token = re.search(
r"access_token=([\w\d]+)",str(browserAddress)).groups()[0]
return access_token
except Exception as ex:
print("access token could not be retrieved. Check your username and password.")
print("Official error: %s" % ex)
return {"error": "access token could not be retrieved. Check your username and password."}
def get_fb_id(access_token):
if "error" in access_token:
return {"error": "access token could not be retrieved"}
"""Gets facebook ID from access token"""
req = requests.get(
'https://graph.facebook.com/me?access_token=' + access_token)
return req.json()["id"]
|
davidzchen/tensorflow | tensorflow/python/kernel_tests/cwise_ops_binary_test.py | Python | apache-2.0 | 36,605 | 0.010245 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = self.evaluate(tf_func(x, iny))
np_right = self.evaluate(tf_func(inx, y))
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate(variables.global_variables_initializer())
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = self.evaluate(tf_func(x, var_y))
np_var_right = self.evaluate(tf_func(var_x, y))
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
| dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np | _func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
@test_util.run_deprecated_v1
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, |
LK/monopoly-simulator | non_color_property.py | Python | mit | 1,996 | 0.027555 | from prop import Property
from groupofchanges import GroupOfChanges
from constants import *
class NonColorProperty(Property):
# Constants
_UTILITY_MULTIPLIERS = { 1: 4, 2: 10 } # multipliers for owning 1 or 2 utilities
_UTILITY = True
_RAILROAD = False
def __init__(self, name, price, rents, property_group, size_of_property_group, mortgaged=False):
super(NonColorProperty, self).__init__(name, price, rents, property_group, size_of_property_group, mortgaged)
def _type_of_property(self, state):
utility_property_group = state.squares[INDEX[WATER_WORKS]].property_group
railroad_property_group = state.squares[INDEX[READING_RAILROAD]].property_group
if self.property_group == utility_property_group:
return NonColorProperty._UTILITY
elif self.property_group == railroad_property_group:
return NonColorProperty._RAILROAD
else:
raise Exception("This instance is not a proper NonColorProperty")
def landed(self, player, roll, state, from_card=False):
ow | ner = state.get_owner(self)
if owner == player:
return GroupOfChanges()
elif owner == state.bank:
return player.buy_or_deny(self, state)
else:
num_owned = owner.property_group_counts[self | .property_group]
rent = self.get_rent(num_owned, roll, state, from_card)
return player.pay(owner, rent, state)
# Returns the rent on this property based on the number of properties in this
# group owned, anding player's roll, and whether they came from a card or not
def get_rent(self, num_owned, roll, state, from_card):
if self._type_of_property(state) == NonColorProperty._UTILITY:
if from_card:
multiplier = NonColorProperty._UTILITY_MULTIPLIERS[2]
else:
multiplier = NonColorProperty._UTILITY_MULTIPLIERS[num_owned]
return multiplier * roll
else:
rent = self.rents[num_owned - 1]
if from_card:
return 2 * rent
else:
return rent
def __str__(self):
s = ""
s += "Name: %s\n" % (self._name)
s += "Mortgaged: " + str(self._mortgaged) + "\n"
return s
|
Jianlong-Peng/pytools | maestro/makeComplex2.py | Python | gpl-2.0 | 6,208 | 0.01047 | '''
#=============================================================================
# FileName: makeComplex.py
# Desc:
# Author: jlpeng
# Email: jlpeng1201@gmail.com
# HomePage:
# Created: 2013-06-21 15:12:17
# LastChange: 2013-06-24 09:32:03
# History:
#=============================================================================
'''
import sys
import os
try:
from schrodinger import structure, structureutil
import stageTools
except ImportError:
print "\n Please run `%s\\maestro_variable.bat` first!!!\n"%(sys.argv[0][:sys.argv[0].rfind("\\")])
sys.exit(1)
receptor = {}
ligands = []
def parse_config(config_file):
inf = open(config_file,"r")
global receptor
global ligands
line = inf.readline()
while line != "":
if line.startswith('#') or line.strip()=="":
line = inf.readline()
continue
if line.startswith("receptor"):
if receptor:
print " Error: more than one receptor is given!!!!"
inf.close()
sys.exit(1)
while line!="" and line.strip()!="":
line = line.split()
receptor[line[0][:-1]] = line[1]
line = inf.readline()
elif line.startswith("ligand"):
temp = {}
while line!="" and line.strip()!="":
line = line.split()
temp[line[0][:-1]] = line[1]
line = inf.readline()
ligands.append(temp)
else:
print r" Error: invalid line `%s`"%line
inf.close()
sys.exit(1)
inf.close()
def writeInpFile(outfile, complex_file):
'''
outfile: xxx.inp, used to run covalent docking
complex_file: while complex(generated here) locates
'''
outf = open(outfile,"w")
print >>outf, """
[ SET:COMPLEXES]
VARCLASS Structures
FILES %s
[ STAGE:PRIME ]
STAGECLASS prime.PrimeStage
INPUTS COMPLEXES
OUTPUTS DOCKED_OUT
PRIME_TYPE COVALENT_DOCKING
LIGAND X:1
INCLUDE_RESIDUE yes
NUM_OUTPUT_STRUCT 1
[ USEROUTS ]
USEROUTS DOCKED_OUT
STRUCTOUT DOCKED_OUT"""%complex_file
outf.close()
def main(argv=sys.argv):
if len(argv) != 2:
print "\n OBJ: to make complex between given ligand and receptor"
print "\n Usage: makeComplex.py config.txt"
print " config.txt: where information about receptor and ligand locate"
sys.exit(1)
global receptor
global ligands
print "to parse `%s`"%argv[1]
parse_config(argv[1])
try:
receptor_file = receptor['receptor_file']
receptor_leaving_atom = int(receptor['receptor_leaving_atom'])
receptor_staying_atom = int(receptor['receptor_staying_atom'])
except KeyError:
print "Error: missing keys for receptor!"
sys.exit(1)
receptor_st = structure.StructureReader(receptor_file).next()
print "receptor: `%s`"%receptor_file
print "receptor_leaving_atom: `%d`"%receptor_leaving_atom
print "receptor_staying_atom: `%d`\n"%receptor_staying_atom
for i,ligand in enumerate(ligands):
print "%d. to process ligand(s) from file %s"%(i+1,ligand['ligand_file'])
try:
ligand_file = ligand['ligand_file']
smarts = ligand['smarts']
complex_out = ligand['complex_out']
#base_name,ext = os.path.splitext(complex_out)
except KeyError:
print " Error: missing key(s)!"
else:
if os.path.exists(complex_out):
print " Error: `%s` is already exists!"%complex_out
print " The corresponding complexes will not be generated!!"
continue
total_success = 0
outf = structure.StructureWriter(complex_out)
inf = structure.StructureReader(ligand_file)
for j,ligand_st in enumerate(inf):
if ligand_st.title == '':
print " > process %dth ligand"%(j+1)
else:
print " > process %dth ligand (%s)"%(j+1,ligand_st.title)
match_list = structureutil.evaluate_smarts(ligand_st,smarts)
print " totally found %d matches"%len(match_list)
for matched_atoms in match_list:
ligand_reactive_atom = matched_atoms[0]
print " - try to make bond between atom `%d`(ligand) and `%d`(receptor)"%(ligand_reactive_atom,receptor_staying_atom)
complexes_st = stageTools.makeComplex(receptor_st,receptor_leaving_atom,receptor_staying_atom,ligand_st,ligand_reactive_atom)
if complexes_st is None:
print " Fail"
else:
#modification. 2013-06-24 20:19
tmp_count = 0
for complex_st in complexes_st:
outf.append(complex_st)
total_success += 1
tmp_count += 1
print " Success (%d)"%tmp_count
#out_name = "%s_%s_%d%s"%(base_name,title,ligand_reactive_at | om,ext)
| #complex_st.write(out_name)
#inp_name = "%s_%s_%d.inp"%(base_name,title,ligand_active_atom)
#writeInpFile(inp_name,out_name)
#print " Success, complex has been written to file"
#print " `%s`"%out_name
#print " And, corresponding inp file was generated"
#print " `%s`"%inp_name
print " => totally %d complexes have been written to file"%total_success
print " `%s`"%complex_out
base_name,ext = os.path.splitext(complex_out)
inp_name = base_name+".inp"
writeInpFile(inp_name,complex_out)
print " corresponding inp file is saved in"
print " `%s`"%inp_name
main()
|
marcusjdh/Space-Shooter | spaceshooter.py | Python | mit | 5,096 | 0.020801 | """
spaceshooter.py
Author: Marcus Helble
Credit: Liam Abbate, Wilson Rimberg
Assignment:
Write and submit a program that implements the spacewar game:
https://github.com/HHS-IntroProgramming/Spacewar
"""
import ggame
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
from random import random
SCREEN_WIDTH = 1500
SCREEN_HEIGHT = 910
pi = 3.1415926535897932384626433832795028841971693993751058209749445923078
backg_asset= ImageAsset("images/starfield.jpg",)
backg=Sprite(backg_asset, (0,0))
backg2=Sprite(backg_asset, (512,0))
backg3=Sprite(backg_asset,(1024,0))
backg4=Sprite(backg_asset, (0,512))
backg5=Sprite(backg_asset, (512,512))
backg6=Sprite(backg_asset, (1024, 512))
class SpaceShip(Sprite):
asset = ImageAsset("images/four_spaceship_by_albertov_with_thrust.png",
Frame(227,0,292-227,125), 4, 'vertical')
def __init__(self, position):
super().__init__(SpaceShip.asset, position)
self.vr = 0.01
self.thrust = 0
self.thrustframe = 1
self.rx = 1
self.ry = -1
self.rxa = 0
self.rxb = 0
self.rya = 0
self.ryb = 0
self.hm = 0
self.vm = 0
self.visible = True
SpaceGame.listenKeyEvent("keydown", "space", self.thrustOn)
SpaceGame.listenKeyEvent("keyup", "space", self.thrustOff)
SpaceGame.listenKeyEvent("keydown", "left arrow", self.left)
SpaceGame.listenKeyEvent("keyup", "left arrow", self.stopleft)
SpaceGame.listenKeyEvent("keydown", "right arrow", self.right)
SpaceGame.listenKeyEvent("keyup", "right arrow", self.stopright)
SpaceGame.listenKeyEvent("keydown", "up arrow", self.up)
SpaceGame.listenKeyEvent("keyup", "up arrow", self.stopup)
SpaceGame.listenKeyEvent("keydown", "down arrow", self.down)
SpaceGame.listenKeyEvent("keyup", "down arrow", self.stopdown)
self.fxcenter = self.fycenter = 0.5
def step(self):
self.rotation = 0
if self.thrust == 1:
self.setImage(self.thrustframe)
self.thrustframe += 1
if self. | thrustframe == 4:
self.thrustframe = 1
else:
self.setImage(0)
if self.rxa == 2 and self.rxb == 2:
self.x=self. | x
self.hm = 0
else:
if self.rx == -5:
self.x=self.x-10
self.hm=1
if self.rx == 5:
self.x=self.x+10
self.hm=2
if self.rya == 2 and self.ryb == 2:
self.y=self.y
self.vm = 0
else:
if self.ry == -5:
self.y=self.y-10
self.vm = 1
if self.ry == 5:
self.y=self.y+10
self.vm = 2
if self.hm==0 and self.vm==0:
self.rotation = 0
self.thrust = 0
else:
self.thrust=1
if self.hm==1:
if self.vm==1:
self.rotation=(1/4)*pi
elif self.vm==2:
self.rotation=(3/4)*pi
else:
self.rotation=pi/2
elif self.hm==2:
if self.vm==1:
self.rotation=(7/4)*pi
elif self.vm==2:
self.rotation=(5/4)*pi
else:
self.rotation=(3/2)*pi
else:
if self.vm==1:
self.rotation=0
elif self.vm==2:
self.rotation=pi
collision = self.collidingWithSprites(Star)
if len(collision) > 0:
self.visible=False
def thrustOn(self, event):
self.thrust = 1
def thrustOff(self, event):
self.thrust = 0
def left(self, event):
self.rx=-5
self.rxa=0
def right(self, event):
self.rx=5
self.rxb=0
def up(self, event):
self.ry=-5
self.rya=0
def down(self, event):
self.ry=5
self.ryb=0
def stopleft(self, event):
self.rxa=2
def stopright(self, event):
self.rxb=2
def stopup(self, event):
self.ryb=2
def stopdown(self, event):
self.rya=2
class Star(Sprite):
asset=ImageAsset("images/sun.png")
height=300
width=300
def __init__(self, position):
super().__init__(Star.asset, position)
self.fxcenter = 0.5
self.fycenter = 0.5
self.circularCollisionModel()
class SpaceGame(App):
def step(self):
for ship in self.getSpritesbyClass(SpaceShip):
ship.step()
def __init__(self, width, height):
super().__init__(width, height)
SpaceShip((125,100))
SpaceShip((175,150))
SpaceShip((75,150))
Star((600,600))
Star((850,300))
myapp = SpaceGame(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run() |
saukrIppl/seahub | thirdpart/openpyxl-2.3.0-py2.7.egg/openpyxl/drawing/text.py | Python | apache-2.0 | 22,428 | 0.00165 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
Set,
NoneSet,
Sequence,
String,
Bool,
MinMax,
Integer
)
from openpyxl.descriptors.excel import (
HexBinary,
TextPoint,
Coordinate,
ExtensionList
)
from openpyxl.descriptors.nested import (
NestedInteger,
NestedString,
NestedText,
NestedValue,
EmptyTag
)
from openpyxl.xml.constants import DRAWING_NS
from .colors import ColorChoiceDescriptor
from .effect import *
from .fill import *
from .shapes import (
LineProperties,
Color,
Scene3D
)
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
class EmbeddedWAVAudioFile(Serialisable):
name = Typed(expected_type=String, allow_none=True)
def __init__(self,
name=None,
):
self.name = name
class Hyperlink(Serialisable):
invalidUrl = Typed(expected_type=String, allow_none=True)
action = Typed(expected_type=String, allow_none=True)
tgtFrame = Typed(expected_type=String, allow_none=True)
tooltip = Typed(expected_type=String, allow_none=True)
history = Typed(expected_type=Bool, allow_none=True)
highlightClick = Typed(expected_type=Bool, allow_none=True)
endSnd = Typed(expected_type=Bool, allow_none=True)
snd = Typed(expected_type=EmbeddedWAVAudioFile, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
invalidUrl=None,
action=None,
tgtFrame=None,
tooltip=None,
history=None,
highlightClick=None,
endSnd=None,
snd=None,
extLst=None,
):
self.invalidUrl = invalidUrl
self.action = action
self.tgtFrame = tgtFrame
self.tooltip = tooltip
self.history = history
self.highlightClick = highlightClick
self.endSnd = endSnd
self.snd = snd
self.extLst = extLst
class Font(Serialisable):
tagname = "latin"
namespace = DRAWING_NS
typeface = String()
panose = Typed(expected_type=HexBinary, allow_none=True)
pitchFamily = Typed(expected_type=MinMax, allow_none=True)
charset = Typed(expected_type=MinMax, allow_none=True)
def __init__(self,
typeface=None,
panose=None,
pitchFamily=None,
charset=None,
):
self.typeface = typeface
self.panose = panose
self.pitchFamily = pitchFamily
self.charset = charset
class CharacterProperties(Serialisable):
tagname = "defRPr"
namespace = DRAWING_NS
kumimoji = Bool(allow_none=True)
lang = String(allow_none=True)
altLang = String(allow_none=True)
sz = Integer()
b = Bool(allow_none=True)
i = Bool(allow_none=True)
u = NoneSet(values=(['words', 'sng', 'dbl', 'heavy', 'dotted',
'dottedHeavy', 'dash', 'dashHeavy', 'dashLong', 'dashLongHeavy',
'dotDash', 'dotDashHeavy', 'dotDotDash', 'dotDotDashHeavy', 'wavy',
'wavyHeavy', 'wavyDbl']))
strike = NoneSet(values=(['noStrike', 'sngStrike', 'dblStrike']))
kern = Integer(allow_none=True)
cap = NoneSet(values=(['small', 'all']))
spc = Integer(allow_none=True)
normalizeH = Bool(allow_none=True)
baseline = Integer(allow_none=True)
noProof = Bool(allow_none=True)
dirty = Bool(allow_none=True)
err = Bool(allow_none=True)
smtClean = Bool(allow_none=True)
smtId = Integer(allow_none=True)
bmk = String(allow_none=True)
ln = Typed(expected_type=LineProperties, allow_none=True)
highlight = Typed(expected_type=Color, allow_none=True)
latin = Typed(expected_type=Font, allow_none=True)
ea = Typed(expected_type=Font, allow_none=True)
cs = Typed(expected_type=Font, allow_none=True)
sym = Typed(expected_type=Font, allow_none=True)
hlinkClick = Typed(expected_type=Hyperlink, allow_none=True)
hlinkMouseOver = Typed(expected_type=Hyperlink, allow_none=True)
rtl = Bool(nested=True, allow_none=True)
extLst = Typed(expected_type=Of | ficeArtExtensionList, allow_none=True)
# uses element group EG_FillProperties
noFill = EmptyTag(namespace=DRAWING_NS)
solidFill = ColorChoiceDescriptor()
gradFill = Typed(expected_type=GradientFillProperties, allow_none=True)
blipFill = Typed(expected_type=BlipFillProperties, allow_none=Tr | ue)
pattFill = Typed(expected_type=PatternFillProperties, allow_none=True)
grpFill = EmptyTag(namespace=DRAWING_NS)
# uses element group EG_EffectProperties
effectLst = Typed(expected_type=EffectList, allow_none=True)
effectDag = Typed(expected_type=EffectContainer, allow_none=True)
# uses element group EG_TextUnderlineLine
uLnTx = EmptyTag()
uLn = Typed(expected_type=LineProperties, allow_none=True)
# uses element group EG_TextUnderlineFill
uFillTx = EmptyTag()
uFill = EmptyTag()
__elements__ = ('ln', 'highlight', 'latin', 'ea', 'cs', 'sym',
'hlinkClick', 'hlinkMouseOver', 'rtl', 'noFill', 'solidFill', 'gradFill',
'blipFill', 'pattFill', 'grpFill', 'effectLst', 'effectDag', 'uLnTx',
'uLn', 'uFillTx', 'uFill')
def __init__(self,
kumimoji=None,
lang=None,
altLang=None,
sz=None,
b=None,
i=None,
u=None,
strike=None,
kern=None,
cap=None,
spc=None,
normalizeH=None,
baseline=None,
noProof=None,
dirty=None,
err=None,
smtClean=None,
smtId=None,
bmk=None,
ln=None,
highlight=None,
latin=None,
ea=None,
cs=None,
sym=None,
hlinkClick=None,
hlinkMouseOver=None,
rtl=None,
extLst=None,
noFill=None,
solidFill=None,
gradFill=None,
blipFill=None,
pattFill=None,
grpFill=None,
effectLst=None,
effectDag=None,
uLnTx=None,
uLn=None,
uFillTx=None,
uFill=None,
):
self.kumimoji = kumimoji
self.lang = lang
self.altLang = altLang
self.sz = sz
self.b = b
self.i = i
self.u = u
self.strike = strike
self.kern = kern
self.cap = cap
self.spc = spc
self.normalizeH = normalizeH
self.baseline = baseline
self.noProof = noProof
self.dirty = dirty
self.err = err
self.smtClean = smtClean
self.smtId = smtId
self.bmk = bmk
self.ln = ln
self.highlight = highlight
self.latin = latin
self.ea = ea
self.cs = cs
self.sym = sym
self.hlinkClick = hlinkClick
self.hlinkMouseOver = hlinkMouseOver
self.rtl = rtl
self.noFill = noFill
self.solidFill = solidFill
self.gradFill = gradFill
self.blipFill = blipFill
self.pattFill = pattFill
self.grpFill = grpFill
self.effectLst = effectLst
self.effectDag = effectDag
self.uLnTx = uLnTx
self.uLn = uLn
self.uFillTx = uFillTx
self.uFill = uFill
class TabStop(Serialisable):
pos = Typed(expected_type=Coordinate, allow_none=True)
algn = Typed(expected_type=Set(values=(['l', 'ctr', 'r', 'dec'])))
def __init__(self,
pos=None,
algn |
wandb/client | wandb/vendor/pygments/styles/paraiso_light.py | Python | mit | 5,645 | 0 | # -*- coding: utf-8 -*-
"""
pygments.styles.paraiso_light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paraíso (Light) by Jan T. Sott
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson
(https://github.com/chriskempson/base16-builder).
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#e7e9db"
CURRENT_LINE = "#b9b6b0"
SELECTION = "#a39e9b"
FOREGROUND = "#2f1e2e"
COMMENT = "#8d8687"
RED = "#ef6155"
ORANGE = "#f99b15"
YELLOW = "#fec418"
GREEN = "#48b685"
AQUA = "#5bc4bf"
BLUE = "#06b6ef"
PURPLE = "#815ba4"
class ParaisoLightStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
| String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: | RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
vreon/figment | examples/theworldfoundry/tests/test_spatial_stackable.py | Python | mit | 6,050 | 0 | import pytest
def test_look(player, gold):
player.perform("look")
assert player.saw("a gold coin (50)")
def test_get_all_implicit(player, gold):
player.perform("get gold")
assert player.saw("You pick up a gold coin (50).")
pla | yer.forget()
player.perform("look")
assert player.did_not_see("gold coin")
def test_get_all_explicit(player, gold):
player.perform("get all gold")
assert player.saw("You pick up a gold coin (50).")
player.forget()
player.perform("look")
assert player.did_not_see("gold coin")
def test_get_all_by_quantity(player, gold):
player.perform("get 50 gold")
assert pla | yer.saw("You pick up a gold coin (50).")
player.forget()
player.perform("look")
assert player.did_not_see("gold coin")
def test_get_all_invalid(player):
player.perform("get all gold")
assert player.saw("You don't see any 'gold' nearby.")
def test_get_subset(player, gold):
player.perform("get 10 gold")
assert player.saw("You pick up a gold coin (10).")
player.forget()
player.perform("inv")
assert player.saw("gold coin (10)")
player.forget()
player.perform("look")
assert player.saw("gold coin (40)")
def test_get_zero(player, gold):
player.perform("get 0 gold")
assert player.saw("You're unable to do that.")
def test_get_too_many(player, gold):
player.perform("get 999 gold")
assert player.saw("You don't see 999 'gold' nearby.")
def test_get_combining(player, gold):
player.perform("get 10 gold")
player.perform("get 20 gold")
player.forget()
player.perform("inv")
assert player.saw("a gold coin (30)")
def test_get_nonstackable(player, ball):
player.perform("get 1 ball")
assert player.saw("a red ball")
def test_get_too_many_nonstackable(player, ball):
player.perform("get 5 ball")
assert player.saw("You don't see 5 'ball' nearby.")
def test_drop_all_implicit(player, gold):
player.perform("get 10 gold")
player.forget()
player.perform("drop gold")
assert player.saw("You drop a gold coin (10).")
def test_drop_all_explicit(player, gold):
player.perform("get 10 gold")
player.forget()
player.perform("drop all gold")
assert player.saw("You drop a gold coin (10).")
def test_drop_subset(player, gold):
player.perform("get 10 gold")
player.forget()
player.perform("drop 5 gold")
assert player.saw("You drop a gold coin (5).")
def test_drop_too_many(player, gold):
player.perform("get 30 gold")
player.forget()
player.perform("drop 999 gold")
assert player.saw("You don't see 999 'gold' in your inventory.")
def test_drop_combining(player, gold):
player.perform("get 10 gold")
player.perform("drop 5 gold")
player.perform("drop 5 gold")
player.forget()
player.perform("look")
assert player.saw("a gold coin (50)")
def test_put_in_all_implicit(player, gold, box):
player.perform("put gold in box")
assert player.saw("a gold coin (50) in a cardboard box")
player.forget()
player.perform("look")
assert player.did_not_see("gold coin")
def test_put_in_all_explicit(player, gold, box):
player.perform("put all gold in box")
assert player.saw("a gold coin (50) in a cardboard box")
player.forget()
player.perform("look")
assert player.did_not_see("gold coin")
def test_put_in_subset(player, gold, box):
player.perform("put 20 gold in box")
assert player.saw("a gold coin (20) in a cardboard box")
player.forget()
player.perform("look")
assert player.saw("gold coin (30)")
player.forget()
player.perform("look in box")
assert player.saw("gold coin (20)")
def test_put_in_too_many(player, gold, box):
player.perform("put 999 gold in box")
assert player.saw("You don't see 999 'gold' nearby.")
def test_put_in_combining(player, gold, box):
player.perform("put 5 gold in box")
player.perform("put 5 gold in box")
player.forget()
player.perform("look in box")
assert player.saw("a gold coin (10)")
def test_put_in_ambiguous(player, gold, box):
player.perform("get 5 gold")
player.perform("put 5 gold in box")
assert player.saw("Which")
assert player.saw("a gold coin (5) (in inventory)")
player.forget()
player.perform("1") # XXX: Could be from inv, could be from room
assert player.saw("a gold coin (5) in a cardboard box")
def test_put_in_semiambiguous(player, gold, box):
player.perform("get 45 gold")
# Would be ambiguous if not for minimum quantity
player.perform("put 45 gold in box")
assert player.saw("a gold coin (45) in a cardboard box")
def test_get_from_all_implicit(player, gold, box):
gold.Spatial.store_in(box)
player.perform("get gold from box")
assert player.saw("a gold coin (50) from a cardboard box")
player.forget()
player.perform("look in box")
assert player.did_not_see("gold coin")
def test_get_from_all_explicit(player, gold, box):
gold.Spatial.store_in(box)
player.perform("get all gold from box")
assert player.saw("a gold coin (50) from a cardboard box")
player.forget()
player.perform("look in box")
assert player.did_not_see("gold coin")
def test_get_from_subset(player, gold, box):
gold.Spatial.store_in(box)
player.perform("get 20 gold from box")
assert player.saw("a gold coin (20) from a cardboard box")
player.forget()
player.perform("look in box")
assert player.saw("gold coin (30)")
player.forget()
player.perform("inv")
assert player.saw("gold coin (20)")
def test_get_from_too_many(player, gold, box):
gold.Spatial.store_in(box)
player.perform("get 999 gold from box")
assert player.saw("You don't see 999 'gold' in a cardboard box.")
def test_get_from_combining(player, gold, box):
gold.Spatial.store_in(box)
player.perform("get 5 gold from box")
player.perform("get 5 gold from box")
player.forget()
player.perform("inv")
assert player.saw("a gold coin (10)")
|
dani0805/django_workflow | django_workflow/migrations/0006_transition_description.py | Python | bsd-3-clause | 518 | 0.001931 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-01-08 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_workflow', '0005_auto_20180104_1559'),
]
operations = [
migrations.AddField(
model_name='transition',
name='description',
field=models.CharField(blank=True, max_length=400, null=True, verbose_name | ='Description'),
| ),
]
|
biokit/biokit | test/rtools/test_tools.py | Python | bsd-2-clause | 434 | 0.016129 | from biokit.rtools import tools
import pytest
import os
skiptravis = pytest.mark.skipif( | "TRAVIS_PYTHON_VERSION" in os.environ,
reason="On travis")
@skiptravis
def test_codecs():
assert 'T' == tools.bool2R(True)
assert 'F' == tools.bool2R(False)
try:
tools.bool2R('ggg')
assert False
except:
assert True
@skiptravis
def test_rcode():
r | = tools.rcode('a=1')
assert r.a == 1
|
emanueldima/b2share | b2share/modules/records/serializers/schemas/marcxml.py | Python | gpl-2.0 | 5,627 | 0.001955 | # -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN, University of Tuebingen.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2Share Records Marc schemas used for serialization."""
from __future__ import absolute_import, print_function
from itertools import chain
from marshmallow import Schema, fields, post_dump
from dateutil.parser import parse
from flask import current_app
class RecordSchemaMarcXMLV1(Schema):
"""Schema for records in MARC."""
control_number = fields.Method('get_id')
def get_id(self, obj):
pids = obj['metadata'].get('_pid')
p = [p['value'] for p in pids if p['type'] == 'b2rec']
return str(p[0])
other_standard_identifier = fields.Method('get_other_standard_identifier')
date_and_time_of_latest_transaction = fields.Function(
lambda o: parse(o['updated']).strftime("%Y%m%d%H%M%S.0"))
main_entry_personal_name = fields.Method('get_main_entry_personal_name')
added_entry_personal_name = fields.Method('get_added_entry_personal_name')
title_statement = fields.Function(
lambda o: o['metadata']['titles'][0])
publication_distribution_imprint = fields.Function(
lambda o: dict(name_of_publisher_distributor=o['metadata'].get('publisher'),
date_of_publication_distribution=o['metadata'].get('publication_date')))
media_type = fields.Function(
lambda o: dict(media_t | ype_term=[x['resource_type_general']
for x in o['metadata'].get('resource_types', [])]))
summary = fields.Function(
lambda o: [di | ct(summary=x.get('description'))
for x in o['metadata'].get('descriptions', [])])
study_program_information_note = fields.Function(
lambda o: [dict(program_name=o['metadata'].get('disciplines', []))])
terms_governing_use_and_reproduction_note = fields.Function(
lambda o: dict(terms_governing_use_and_reproduction=
o['metadata'].get('license', {}).get('license'),
uniform_resource_identifier=
o['metadata'].get('license', {}).get('license_uri')))
information_relating_to_copyright_status = fields.Function(
lambda o: dict(copyright_status='open' if o['metadata']['open_access'] else 'closed'))
language_note = fields.Function(
lambda o: [dict(language_note=o['metadata'].get('language'))])
index_term_uncontrolled = fields.Function(
lambda o: [dict(uncontrolled_term=x) for x in o['metadata'].get('keywords', [])])
electronic_location_and_access = fields.Function(
lambda o: [dict(uniform_resource_identifier=f['ePIC_PID'],
file_size=str(f['size']),
access_method="HTTP")
if f.get('ePIC_PID') else None
for f in o['metadata'].get('_files', [])])
# Custom fields:
embargo_date = fields.Raw(attribute='metadata.embargo_date')
_oai = fields.Raw(attribute='metadata._oai')
def get_other_standard_identifier(self, o):
pids = [p['value'] for p in o['metadata']['_pid']
if p['type'] in {'ePIC_PID', 'DOI'}]
alt_ids = [x['alternate_identifier']
for x in o['metadata'].get('alternate_identifiers', [])]
return [dict(standard_number_or_code=x) for x in chain(pids, alt_ids)]
def get_main_entry_personal_name(self, o):
creators = o['metadata'].get('creators', [])
if len(creators) > 0:
return dict(personal_name=creators[0]['creator_name'])
return dict()
def get_added_entry_personal_name(self, o):
"""Get added_entry_personal_name."""
items = []
creators = o['metadata'].get('creators', [])
if len(creators) > 1:
for c in creators[1:]:
items.append(dict(personal_name=c['creator_name']))
contributors = o['metadata'].get('contributors', [])
for c in contributors:
items.append(dict(personal_name=c['contributor_name']))
return items
@post_dump(pass_many=True)
def remove_empty_fields(self, data, many):
"""Dump + Remove empty fields."""
_filter_empty(data)
return data
def _filter_empty(record):
"""Filter empty fields."""
if isinstance(record, dict):
for k in list(record.keys()):
if record[k]:
_filter_empty(record[k])
if not record[k]:
del record[k]
elif isinstance(record, list) or isinstance(record, tuple):
for (k, v) in list(enumerate(record)):
if v:
_filter_empty(record[k])
if not v:
del record[k]
|
bchappet/dnfpy | src/test_dnfpy/model/testFieldMap.py | Python | gpl-2.0 | 1,305 | 0.02069 | import unittest
from dnfpy.model.fieldMap import FieldMap
class TestFieldMap(unittest.TestCase):
def setUp(self):
self.uut = FieldMap("uut",size=1,dt=0.1,lat=1,aff=1,tau=0.8,h=0,
th=0.64,model='cnft')
def test_update(self):
self.uut.update(0.1)
obtained = self.uut.getData()
expected = 0.25
self.assertEqual(obtained,expected)
def test_update2(self):
self.uut.update(0.1)
self.uut.update(0.2)
obtained = self.uut.getData()
expected = 0.46875
self.assertEqual(obtained,expected)
def test_update_spike(self):
self.uut.setArg(model='spike')
self.uut.update(0.1)
obtained = self.uut.getData()
| expected = 1 | .375
self.assertEqual(obtained,expected)
def test_update_spike(self):
self.uut.setArg(model='spike')
self.uut.update(0.1)
self.uut.update(0.2)
obtained = self.uut.getData()
expected = 1.375
self.assertEqual(obtained,expected)
if __name__ == "__main__":
unittest.main()
|
jordanemedlock/psychtruths | temboo/core/Library/Foursquare/Users/Badges.py | Python | apache-2.0 | 3,462 | 0.004622 | # -*- coding: utf-8 -*-
###############################################################################
#
# Badges
# Returns badges for a given user.
#
# Python vers | ions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
| # software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Badges(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Badges Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Badges, self).__init__(temboo_session, '/Library/Foursquare/Users/Badges')
def new_input_set(self):
return BadgesInputSet()
def _make_result_set(self, result, path):
return BadgesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return BadgesChoreographyExecution(session, exec_id, path)
class BadgesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Badges
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(BadgesInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(BadgesInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) Identity of the user to get badges for. Defaults to "self" to get lists of the acting user.)
"""
super(BadgesInputSet, self)._set_input('UserID', value)
class BadgesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Badges Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class BadgesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return BadgesResultSet(response, path)
|
dimka665/range-regex | setup.py | Python | bsd-2-clause | 628 | 0.001592 | import os
from setuptools import find_packages, setup
root = os.path.dirname(os.path.realpath(__file__))
long_description = open(os.path.join(root, 'README.rst')).read()
setup(
name='range-regex',
version='1.0.3',
description='Python numeric range regular expression generat | or',
long_description=long_description,
url='http://github.com/dimka665/range-regex',
author=' | Dmitry Voronin',
author_email='dimka665@gmail.com',
license='BSD',
# packages=['range_regex'],
packages=find_packages(),
include_package_data=True,
keywords='numeric range regex regular expression generator',
) |
mastizada/kuma | vendor/packages/ipython/IPython/Extensions/ext_rescapture.py | Python | mpl-2.0 | 1,498 | 0.021362 | # -*- coding: utf-8 -*-
""" IPython extension: new prefilters for output grabbing
Provides
var = %magic blah blah
var = !ls
"""
import IPython.ipapi
from IPython.genutils import *
ip = IPython.ipapi.get()
import re
def hnd_magic(line,mo):
""" Handle a = %mymagic blah blah """
#cmd = genutils.make_quoted_expr(mo.group('syscmd'))
#mag = 'ipmagic
#return "%s = %s"
var = mo.group('varname')
cmd = mo.group('cmd')
expr = make_quoted_expr(cmd)
return itpl('$var = _ip.magic($expr)')
def hnd_syscmd(line,mo):
""" Handle a = !ls """
#cmd = genutils.make_quoted_expr(mo.group('syscmd'))
#mag = 'ipmagic
#return "%s = %s"
var = mo.group('varname')
cmd = mo.group('cmd')
expr = make_quoted_expr(itpl("sc -l =$cmd"))
return itpl('$var = _ip.magic($expr)')
def install_re_handler(pat, hnd):
ip.meta.re_prefilters.append((re.compile(pat), hnd))
def init_handlers():
ip.meta.re_prefilters = []
install_re_handler('(?P<varname>[\w\.]+)\s*=\s*%(?P<cmd>.*)',
hnd_magic
)
install_re_handler('(?P<varname>[\w\.]+)\s*=\s*!(?P<cmd>.*)',
hnd_syscmd
)
init_handlers()
def rege | x_prefilter_f(self,line):
for pat, handler in ip.meta.re_prefilters:
mo = pat.match(line)
if mo:
return handler(line,mo)
|
raise IPython.ipapi.TryNext
ip.set_hook('input_prefilter', regex_prefilter_f)
|
TheGU/omoma | omoma/omoma_web/templatetags/__init__.py | Python | gpl-3.0 | 666 | 0 | # Copyright | 2011 Sebastien Maccagnoni-Munch
#
# This file is part of Omoma.
#
# Omoma is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# Omoma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or F | ITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Omoma. If not, see <http://www.gnu.org/licenses/>.
"""
Django template tags for Omoma
"""
|
jakevdp/lombscargle | lombscargle/tests/test_lombscargle.py | Python | bsd-3-clause | 5,888 | 0 | import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units
from .. import LombScargle
from ..implementations import lombscargle_slow, lombscargle
METHOD_NAMES = ['auto', 'fast', 'slow', 'scipy', 'chi2', 'fastchi2']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('method', METHOD_NAMES)
@pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, dy = data
freq = np.asarray(np.random.rand(*shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = lombscargle(t, y, frequency=freq,
fit_bias=False, method=method)
assert_equal(PLS.shape, shape)
@pytest.mark.parametrize('method', METHOD_NAMES)
@pytest.mark.parametrize('t_unit', [units.second, units.day])
@pytest.mark.parametrize('frequency_unit', [units.Hz, 1. / units.second])
@pytest.mark.parametrize('y_unit', [units.mag, units.jansky])
def test_units_match(method, t_unit, frequency_unit, y_unit, data):
t, y, dy = data
dy = dy.mean() # scipy only supports constant errors
t = t * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = np.linspace(0.5, 1.5, 10) * frequency_unit
PLS = lombscargle(t, y, frequency=frequency,
fit_bias=False, method=method)
assert_equal(PLS.unit, units.dimensionless_unscaled)
PLS = lombscargle(t, y, dy,
frequency=frequency,
fit_bias=False, method=method)
assert_equal(PLS.unit, units.dimensionless_unscaled)
@pytest.mark.parametrize('method', METHOD_NAMES)
def test_units_mismatch(method, data):
t, y, dy = data
dy = dy.mean() # scipy only supports constant errors
t = t * units.second
y = y * units.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t unitsdo not match
with pytest.raises(ValueError) as err:
lombscargle(t, y, frequency=frequency,
method=method, fit_bias=False)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
lombscargle(t, y, dy, frequency / t.unit,
method=method, fit_bias=False)
assert str(err.value).startswith('Units of y not equivalent')
@pytest.mark.parametrize('method', METHOD_NAMES)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('freq', [0.8 + 0.01 * np.arange(40)])
def test_common_interface(method, center_data, freq, data):
t, y, dy = data
if freq is None:
freq_expected = LombScargle(t, y, dy).autofrequency(t)
else:
freq_expected = freq
expected_PLS = lombscargle_slow(t, y, dy=None, frequency=freq_expected,
fit_bias=False, center_data=center_data)
PLS = lombscargle(t, y, frequency=freq, method=method,
fit_bias=False, center_data=center_data)
if method in ['fastchi2', 'fast', 'auto']:
atol = 0.005
else:
atol = 0
assert_allclose(PLS, expected_PLS, atol=atol)
@pytest.mark.parametrize('method', METHOD_NAMES)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_bias', [True, False])
@pytest.mark.parametrize('freq', [0.8 + 0.01 * np.arange(40)])
def test_object_interface_power(data, method, center_data, fit_bias, freq):
t, y, dy = data
if method == 'scipy' and fit_bias:
return
if method == 'scipy':
dy = None
expected_PLS = lombscargle(t, y, dy,
frequency=freq,
method=method,
fit_bias=fit_bias,
center_data=center_data)
ls = LombScargle(t, y, dy, fit_bias=fit_bias, center_data=center_data)
PLS = ls.pow | er(freq, method=method)
assert_allclose(PLS, expected_PLS)
@pytest.mark.parametrize('method', METHOD_NAMES)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_bias', [True, False])
de | f test_object_interface_autopower(data, method, center_data, fit_bias):
t, y, dy = data
if method == 'scipy' and fit_bias:
return
if method == 'scipy':
dy = None
ls = LombScargle(t, y, dy, fit_bias=fit_bias, center_data=center_data)
freq, PLS = ls.autopower(method=method)
expected_PLS = lombscargle(t, y, dy, freq,
method=method,
fit_bias=fit_bias,
center_data=center_data)
# TODO: test frequency output?
assert_allclose(PLS, expected_PLS)
@pytest.mark.parametrize('fit_bias', [True, False])
@pytest.mark.parametrize('freq', [1.0, 2.0])
def test_object_interface_model(fit_bias, freq):
rand = np.random.RandomState(0)
t = 10 * rand.rand(40)
params = 10 * rand.rand(3)
y = np.zeros_like(t)
if fit_bias:
y += params[0]
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
ls = LombScargle(t, y, center_data=False, fit_bias=fit_bias)
y_fit = ls.model(t, freq)
assert_allclose(y_fit, y)
def test_object_interface_bad_input(data):
t, y, dy = data
ls = LombScargle(t, y, dy)
# this should fail because frequency and 1/t unitsdo not match
with pytest.raises(ValueError) as err:
ls.power(frequency=None)
assert str(err.value).startswith('Must supply a valid frequency')
|
zyphrus/fetch-django | provider/urls.py | Python | mit | 537 | 0 | from django.conf.urls import url
from provider import view | s
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^new/$', views.new, name='new'),
url(r'^(?P<provider_id>\d+)/$', views.view, name='view'),
url(r'^(?P<provider_id>\d+)/edit/$', views.edit, name='edit'),
url(r'^(?P<provider_id>\d+)/delete/$', views.delete, name='delete'),
# Base Provider
url(r'^base/$', vi | ews.base_index, name='base_index'),
url(r'^base/(?P<base_provider_id>\d+)/$',
views.base_view, name='base_view'),
]
|
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_profiler.py | Python | apache-2.0 | 8,547 | 0.000468 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import tempfile
import numpy as np
import paddle.utils as utils
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid import compiler, Program, program_guard
import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2
class TestProfiler(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['CPU_NUM'] = str(4)
def build_program(self, compile_program=True):
startup_program = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
image = fluid.layers.data(name='x', shape=[784], dtype='float32')
hidden1 = fluid.layers.fc(input=image, size=64, act='relu')
i = layers.zeros(shape=[1], dtype='int64')
counter = fluid.layers.zeros(
shape=[1], dtype='int64', force_cpu=True)
until = layers.fill_constant([1], dtype='int64', value=10)
data_arr = layers.array_write(hidden1, i)
cond = fluid.layers.less_than(x=counter, y=until)
while_op = fluid.layers.While(cond=cond)
with while_op.block():
hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
layers.array_write(hidden_n, i, data_arr)
fluid.layers.increment(x=counter, value=1, in_place=True)
layers.less_than(x=counter, y=until, cond=cond)
hidden_n = layers.array_read(data_arr, i)
hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
batch_size = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size)
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
opts = optimizer.minimize(avg_cost, startup_program=startup_program)
if compile_program:
# TODO(luotao): profiler tool may have bug with multi-thread parallel executor.
# https://github.com/PaddlePaddle/Paddle/pull/25200#issuecomment-650483092
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
train_program = fluid.compiler.CompiledProgram(
main_program).with_data_parallel(
loss_name=avg_cost.name, exec_strategy=exec_strategy)
else:
train_program = main_program
return train_program, startup_program, avg_cost, batch_size, batch_acc
def get_profile_path(self):
profile_path = os.path.join(tempfile.gettempdir(), "profile")
open(profile_path, "w").write("")
return profile_path
def check_profile_result(self, profile_path):
data = open(profile_path, 'rb').read()
if (len(data) > 0):
profile_pb = profiler_pb2.Profile()
profile_pb.ParseFromString(data)
self.assertGreater(len(profile_pb.events), 0)
for event in profile_pb.events:
if event.type == profiler_pb2.Event.GPUKernel:
if not event.detail_info and not event.name.startswith(
"MEM"):
raise Exception(
"Kernel %s missing event. Has this kernel been recorded by RecordEvent?"
% event.name)
elif event.type == profiler_pb2.Event.CPU and (
event.name.startswith("Driver API") or
| event.name.startswith("Runtime API")):
print("Warning: unregister", event.name)
def run_iter(self, exe, main_program, fetch_list):
x = np.random.random((32, 784)).astype("float32")
y = np.random.randint(0, 10, (32, 1)).astype("int64")
outs = exe.run(main_program,
feed={'x': | x,
'y': y},
fetch_list=fetch_list)
def net_profiler(self,
exe,
state,
tracer_option,
batch_range=None,
use_parallel_executor=False,
use_new_api=False):
main_program, startup_program, avg_cost, batch_size, batch_acc = self.build_program(
compile_program=use_parallel_executor)
exe.run(startup_program)
profile_path = self.get_profile_path()
if not use_new_api:
with profiler.profiler(state, 'total', profile_path, tracer_option):
for iter in range(10):
if iter == 2:
profiler.reset_profiler()
self.run_iter(exe, main_program,
[avg_cost, batch_acc, batch_size])
else:
options = utils.ProfilerOptions(options={
'state': state,
'sorted_key': 'total',
'tracer_level': tracer_option,
'batch_range': [0, 10] if batch_range is None else batch_range,
'profile_path': profile_path
})
with utils.Profiler(enabled=True, options=options) as prof:
for iter in range(10):
self.run_iter(exe, main_program,
[avg_cost, batch_acc, batch_size])
utils.get_profiler().record_step()
if batch_range is None and iter == 2:
utils.get_profiler().reset()
# TODO(luotao): check why nccl kernel in profile result.
# https://github.com/PaddlePaddle/Paddle/pull/25200#issuecomment-650483092
# self.check_profile_result(profile_path)
def test_cpu_profiler(self):
exe = fluid.Executor(fluid.CPUPlace())
for use_new_api in [False, True]:
self.net_profiler(
exe,
'CPU',
"Default",
batch_range=[5, 10],
use_new_api=use_new_api)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"profiler is enabled only with GPU")
def test_cuda_profiler(self):
exe = fluid.Executor(fluid.CUDAPlace(0))
for use_new_api in [False, True]:
self.net_profiler(
exe,
'GPU',
"OpDetail",
batch_range=[0, 10],
use_new_api=use_new_api)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"profiler is enabled only with GPU")
def test_all_profiler(self):
exe = fluid.Executor(fluid.CUDAPlace(0))
for use_new_api in [False, True]:
self.net_profiler(
exe,
'All',
"AllOpDetail",
batch_range=None,
use_new_api=use_new_api)
class TestProfilerAPIError(unittest.TestCase):
def test_errors(self):
options = utils.ProfilerOptions()
self.assertTrue(options['profile_path'] is None)
self.assertTrue(options['timeline_path'] is None)
options = options.with_state('All')
self.assertTrue(options['st |
dsoprea/NsqSpinner | nsq/producer.py | Python | gpl-2.0 | 2,076 | 0.004817 | import logging
import gevent
import nsq.master
import nsq.node_collection
import nsq.connection
_logger = logging.getLogger(__name__)
class Producer(nsq.master.Master):
def __init__(self, node_collection, tls_ca_bundle_filepath=None,
tls_auth_pair=None, compression=False, identify=None,
*args, **kwargs):
# A producer can interact only with nsqd servers.
assert issubclass(
node_collection.__class__,
nsq.node_collection.ServerNodes) is True
super(Producer, self).__init__(*args, **kwargs)
is_tls = bool(tls_ca_bundle_filepath or tls_auth_pair)
if is_tls is True:
if tls_ca_bundle_filepath is None:
raise ValueError("Please provide a CA bundle.")
nsq.connection.TLS_CA_BUNDLE_FILEPATH = tls_ca_bundle_filepath
nsq.connection.TLS_AUTH_PAIR = tls_auth_pair
self.identify.set_tls_v1()
if compression:
if compression is True:
compression = None
self.set_compression(compression)
# Technically, any node can have a context. The elements in our current
# context named-tuple just aren't relevant for anything but a consumer.
context = nsq.master.NODE_CONTEXT(None, None)
nodes = node_collection.get_servers(None)
self.set_servers([(context, server) for server in nodes])
# If we we're given an identify instance, apply our apply our identify
# defaults them, and then repl | ace our identify values -with- them (so we
# don't lose the values that we set, but can allow them to set everything
# else).
if identify is not None:
identify.up | date(self.identify.parameters)
self.identify.update(identify.parameters)
def publish(self, topic, message):
self.connection_election.elect_connection().pub(topic, message)
def mpublish(self, topic, messages):
self.connection_election.elect_connection().mpub(topic, messages)
|
osborne6/luminotes | controller/test/Test_groups.py | Python | gpl-3.0 | 5,743 | 0.043705 | from Test_controller import Test_controller
import Stub_urllib2
from controller.Groups import Groups
from model.Group import Group
from model.User import User
class Test_groups( Test_contr | oller ):
def setUp( self ):
Test_controller.setUp( self )
Groups.urllib2 = Stub_urllib2
self.group_name = u"my group"
self.group_name2 = u"other group"
self.username = u"mulder"
self.password = u"trustno1"
self.email_address = u"out-there@example.com"
self.username2 = u"scully"
self.password2 = u"trustsome1"
self.email_address2 = u" | out-there@example.com"
self.username3 = u"skinner"
self.password3 = u"trustne1"
self.email_address3 = u"somewhere@gov.gov"
self.group = Group.create( self.database.next_id( Group ), self.group_name )
self.database.save( self.group, commit = False )
self.group2 = Group.create( self.database.next_id( Group ), self.group_name )
self.database.save( self.group2, commit = False )
self.user = User.create( self.database.next_id( User ), self.username, self.password, self.email_address )
self.database.save( self.user, commit = False )
self.database.execute( self.user.sql_save_group( self.group.object_id, admin = False ) )
self.user2 = User.create( self.database.next_id( User ), self.username2, self.password2, self.email_address2 )
self.database.save( self.user2, commit = False )
self.database.execute( self.user2.sql_save_group( self.group.object_id, admin = True ) )
self.user3 = User.create( self.database.next_id( User ), self.username3, self.password3, self.email_address3 )
self.database.save( self.user3, commit = False )
self.database.execute( self.user3.sql_save_group( self.group.object_id, admin = False ) )
self.database.commit()
def test_load_users( self ):
self.login2()
result = self.http_post( "/groups/load_users", dict(
group_id = self.group.object_id,
), session_id = self.session_id )
assert len( result[ u"admin_users" ] ) == 1
assert result[ u"admin_users" ][ 0 ].object_id == self.user2.object_id
assert result[ u"admin_users" ][ 0 ].username == self.user2.username
assert len( result[ u"other_users" ] ) == 2
assert result[ u"other_users" ][ 0 ].object_id == self.user.object_id
assert result[ u"other_users" ][ 0 ].username == self.user.username
assert result[ u"other_users" ][ 1 ].object_id == self.user3.object_id
assert result[ u"other_users" ][ 1 ].username == self.user3.username
assert result[ u"group" ].object_id == self.group.object_id
assert result[ u"group" ].name == self.group.name
assert result[ u"group" ].admin == self.group.admin
def test_load_users_without_access( self ):
self.login2()
result = self.http_post( "/groups/load_users", dict(
group_id = self.group2.object_id,
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
def test_load_users_without_admin_access( self ):
self.login()
result = self.http_post( "/groups/load_users", dict(
group_id = self.group.object_id,
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
def test_load_users_with_unknown_group( self ):
self.login()
result = self.http_post( "/groups/load_users", dict(
group_id = u"unknowngroupid",
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
def test_update_settings( self ):
self.login2()
new_name = u"new group name"
result = self.http_post( "/groups/update_settings", dict(
group_id = self.group.object_id,
group_name = new_name,
group_settings_button = u"save settings",
), session_id = self.session_id )
assert u"saved" in result[ u"message" ]
group = self.database.load( Group, self.group.object_id )
assert group.name == new_name
def test_update_settings_without_access( self ):
self.login2()
new_name = u"new group name"
result = self.http_post( "/groups/update_settings", dict(
group_id = self.group2.object_id,
group_name = new_name,
group_settings_button = u"save settings",
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
group = self.database.load( Group, self.group.object_id )
assert group.name == self.group.name
def test_update_settings_without_admin_access( self ):
self.login()
new_name = u"new group name"
result = self.http_post( "/groups/update_settings", dict(
group_id = self.group.object_id,
group_name = new_name,
group_settings_button = u"save settings",
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
group = self.database.load( Group, self.group.object_id )
assert group.name == self.group.name
def test_update_settings_with_unknown_group( self ):
self.login2()
new_name = u"new group name"
result = self.http_post( "/groups/update_settings", dict(
group_id = u"unknowngroupid",
group_name = new_name,
group_settings_button = u"save settings",
), session_id = self.session_id )
assert u"access" in result[ u"error" ]
group = self.database.load( Group, self.group.object_id )
assert group.name == self.group.name
def login( self ):
result = self.http_post( "/users/login", dict(
username = self.username,
password = self.password,
login_button = u"login",
) )
self.session_id = result[ u"session_id" ]
def login2( self ):
result = self.http_post( "/users/login", dict(
username = self.username2,
password = self.password2,
login_button = u"login",
) )
self.session_id = result[ u"session_id" ]
|
junranhe/tf-faster-rcnn | lib/model/config.py | Python | mit | 11,161 | 0.005197 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0005
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = 30000
__C.TRAIN.CACHE_PATH = None
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# __C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
# __C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to | keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top | proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a squre of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during finetuning, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
# Whether to tune the batch nomalization parameters during training
__C.RESNET.BN_TRAIN = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1. / 16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI poo |
dmerejkowsky/qibuild | python/qipkg/actions/deploy_package.py | Python | bsd-3-clause | 1,732 | 0.005774 | ## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Deploy and install a package to a target
"""
import os
import sys
import zipfile
from qisys import ui
import qisys.command
import qisys.parsers
import qipkg.package
def configure_parser(parser):
qisys.parsers.default_parser(parser)
qisys.parsers.deploy_parser(parser)
parser.add_argument("pkg_path")
def do(args):
urls = qisys.parsers.get_deploy_urls(args)
pkg_path = args.pkg_path
for url in urls:
deploy(pkg_path, url)
def deploy(pkg_path, url):
ui.info(ui.green, "Deploying",
ui.reset, ui.blue, pkg_path,
ui.reset, ui.green, "to",
ui.reset, ui.blue, url.as_string)
pkg_name = qipkg.package.name_from_archive(pkg_path)
scp_cmd = ["scp",
pkg_path,
"%s@%s:" % (url.user, url.host)]
qisys.command.call(scp_cmd)
try:
_install_package(url, pkg_name, pkg_path)
except Exception as e:
ui.error("Unable to install package on target")
ui.error("Error was: ", e)
rm_cmd = | ["ssh", "%s@%s" % (url.user, url.host),
"rm", os.path.basename(pkg_path)]
qisys.command.call(rm_cmd)
def _install_package(url, pkg_name, pkg_path):
import qi
app = qi.Application()
session = qi.Session()
session.connect("tcp://%s:9559" % (url.host))
packag | e_manager = session.service("PackageManager")
package_manager.removePkg(pkg_name)
ret = package_manager.install(
"/home/%s/%s" % (url.user, os.path.basename(pkg_path)))
ui.info("PackageManager returned: ", ret)
|
rnelson/adventofcode | advent2015/day17.py | Python | mit | 2,152 | 0 | #!/usr/bin/env python3
"""
http://adventofcode.com/day/17
Part 1
------
The elves bought too much eggnog again - 150 liters this time. To
fit it all into your refrigerator, you'll need to move it into
smaller containers. You take an inventory of the capacities of
the available containers.
For example, suppose you have containers of size 20, 15, 10, 5,
and 5 liters. If you need to store 25 liters, there are four ways
to do it:
- 15 and 10
- 20 and 5 (the first 5)
- 20 and 5 (the second 5)
- 15, 5, and 5
Filling all containers entirely, how many different combinations
of containers can exactly fit all 150 liters of eggnog?
Part 2
------
While playing with all the containers in the kitchen, another load
of eggnog arrives! The shipping and receiving department is
requesting as many containers as you can spare.
Find the minimum number of containers that can exactly fit all
150 liters of eggnog. How many different ways can you fill that
number of containers and still hold exactly 150 litres?
In the example above, the minimum number of containers was two.
There were three ways to use that many containers, and so the
answer there would be 3.
"""
from __future__ import print_function, unicode_literals
from itertools import combinations
import os
import re
import sys
INFILE = 'inputs/input17.txt'
def main():
containers = list()
with open(INFILE) as f:
for line in f:
containers.append(int(line.strip()))
# Part 1
p1count = 0
for s in range(len(containers)):
for c in combinations(containers, s):
if sum(c) == 150:
p1count += 1
# Part 2
p2sizes = dict()
p2min = len(containers)
for i in range(p2min):
p2sizes[i] = 0
for s in range(len(containers)):
for c in combinations(containers, | s):
if sum(c) == 150:
if len(c) < p2min:
p2min = len(c)
p2sizes[s] += 1
msg = '[Python] Puzzle 17-1: {}'
print(msg.format(p1count))
msg = '[Python] Puzzle 17-2: {}'
print(msg.format(p2 | sizes[p2min]))
if __name__ == '__main__':
main()
|
rven/odoo | addons/calendar/tests/test_event_recurrence.py | Python | agpl-3.0 | 27,194 | 0.001434 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError
import pytz
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo.tests.common import SavepointCase
class TestRecurrentEvents(SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
lang = cls.env['res.lang']._lang_get(cls.env.user.lang)
lang.week_start = '1' # Monday
def assertEventDates(self, events, dates):
events = events.sorted('start')
self.assertEqual(len(events), len(dates), "Wrong number of events in the recurrence")
self.assertTrue(all(events.mapped('active')), "All events should be active")
for event, dates in zip(events, dates):
start, stop = dates
self.assertEqual(event.start, start)
self.assertEqual(event.stop, stop)
class TestCreateRecurrentEvents(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 25, 8, 0),
'stop': datetime(2019, 10, 27, 18, 0),
'recurrency': True,
})
def test_weekly_count(self):
""" Every week, on Tuesdays, for 3 occurences """
detached_events = self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tu': True,
'interval': 1,
'count': 3,
'event_tz': 'UTC',
})
self.assertEqual(detached_events, self.event, "It should be detached from the recurrence")
self.assertFalse(self.event.recurrence_id, "It should be detached from the recurrence")
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 29, 8, 0), datetime(2019, 10, 31, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tu': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2_week_start_sunday(self):
lang = self.env['res.lang']._lang_get(self.env.user.lang)
lang.week_start = '7' # Sunday
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tu': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
lang.week_start = '1' # Monday
def test_weekly_until(self):
self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tu': True,
'interval': 2,
'end_type': 'end_date',
'until': datetime(2019, 11, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 2 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_monthly_count_by_date(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'date',
'day': 15,
'end_type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 15, 8, 0), datetime(2019, 10, 17, 18, 0)),
(datetime(2019, 12, 15, 8, 0), datetime(2019, 12, 17, 18, 0)),
(datetime(2020, 2, 15, 8 | , 0), datetime(2020, 2, 17, 18, 0)),
])
def test_monthly_count_by_date_31(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 1,
'month_by': 'date',
'day': 31,
'end | _type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 31, 8, 0), datetime(2019, 11, 2, 18, 0)),
# Missing 31th in November
(datetime(2019, 12, 31, 8, 0), datetime(2020, 1, 2, 18, 0)),
(datetime(2020, 1, 31, 8, 0), datetime(2020, 2, 2, 18, 0)),
])
def test_monthly_until_by_day(self):
""" Every 2 months, on the third Tuesday, until 15th March 2020 """
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'day',
'byday': '3',
'weekday': 'TU',
'end_type': 'end_date',
'until': date(2020, 3, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 15, 8, 0), datetime(2019, 10, 17, 18, 0)),
(datetime(2019, 12, 17, 8, 0), datetime(2019, 12, 19, 18, 0)),
(datetime(2020, 2, 18, 8, 0), datetime(2020, 2, 20, 18, 0)),
])
def test_monthly_until_by_day_last(self):
""" Every 2 months, on the last Wednesday, until 15th January 2020 """
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'monthly',
'month_by': 'day',
'weekday': 'WE',
'byday': '-1',
'end_type': 'end_date',
'until': date(2020, 1, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 30, 8, 0), datetime(2019, 11, 1, 18, 0)),
(datetime(2019, 12, 25, 8, 0), datetime(2019, 12, 27, 18, 0)),
])
def test_yearly_count(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'yearly',
'count': 2,
'event_tz': 'UTC',
})
events = self.event.recurrence_id.calendar_event_ids
self.assertEqual(len(events), 2, "It sh |
robertchase/spindrift | spindrift/mysql/packet.py | Python | mit | 9,220 | 0.000651 | import struct
import spindrift.mysql.charset as charset
from spindrift.mysql.constants import CLIENT, SERVER_STATUS
import spindrift.mysql.util as util
MAX_PACKET_LEN = 2**24-1
NULL_COLUMN = 251
UNSIGNED_CHAR_COLUMN = 251
UNSIGNED_SHORT_COLUMN = 252
UNSIGNED_INT24_COLUMN = 253
UNSIGNED_INT64_COLUMN = 254
def write(send, sequence, payload):
data = util.pack_int24(len(payload)) + util.int2byte(sequence) + payload
send(data)
class Packet(object):
def __init__(self, data=None):
self.number = 0
self.data = data
self.buffer = None
self._error = None
self | ._position = 0
def __repr__(self):
return 'n=%s, d=%s' % (self.number, self.data)
def reset(self, sequence):
self.clear()
if sequence is not None:
self.number = sequence
else:
self.increment()
def clear(self):
self.data = b''
self._position = 0
def increment(self):
self | .number = (self.number + 1) % 256
def read(self, size):
result = self.data[self._position:self._position+size]
self._position += size
return result
def read_all(self):
result = self.data[self._position:]
self._position = None # ensure no subsequent read()
return result
def read_uint8(self):
result = self.data[self._position]
self._position += 1
return result
def read_uint16(self):
result = struct.unpack_from('<H', self.data, self._position)[0]
self._position += 2
return result
def read_uint24(self):
low, high = struct.unpack_from('<HB', self.data, self._position)
self._position += 3
return low + (high << 16)
def read_uint32(self):
result = struct.unpack_from('<I', self.data, self._position)[0]
self._position += 4
return result
def read_uint64(self):
result = struct.unpack_from('<Q', self.data, self._position)[0]
self._position += 8
return result
def read_struct(self, fmt):
s = struct.Struct(fmt)
result = s.unpack_from(self.data, self._position)
self._position += s.size
return result
def read_string(self):
end_pos = self.data.find(b'\0', self._position)
if end_pos < 0:
return None
result = self.data[self._position:end_pos]
self._position = end_pos + 1
return result
def read_length_encoded_integer(self):
"""Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte.
"""
c = self.read_uint8()
if c == NULL_COLUMN:
return None
if c < UNSIGNED_CHAR_COLUMN:
return c
elif c == UNSIGNED_SHORT_COLUMN:
return self.read_uint16()
elif c == UNSIGNED_INT24_COLUMN:
return self.read_uint24()
elif c == UNSIGNED_INT64_COLUMN:
return self.read_uint64()
def read_length_coded_string(self):
"""Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".)
"""
length = self.read_length_encoded_integer()
if length is None:
return None
return self.read(length)
@property
def is_ok(self):
return self.data[0:1] == b'\0' and len(self.data) >= 7
@property
def is_error(self):
return self.data[0:1] == b'\xff'
@property
def is_eof(self):
return self.data[0:1] == b'\xfe' and len(self.data) < 9
@property
def error(self):
if self._error:
return self._error
if not hasattr(self, 'data'):
return None
if not self.is_error or self.data[3:4] != b'#':
return None
return self.data[9:].decode('utf-8', 'replace')
def is_auth_switch_request(self):
return self.data[0:1] == b'\xfe'
def handle(self, data=None):
if data is None:
data = b''
if self.buffer is None:
self.buffer = data
else:
self.buffer += data
if len(self.buffer) < 4:
return False
low, high, packet_number = struct.unpack('<HBB', self.buffer[:4])
packet_length = low + (high << 16)
if packet_number != self.number:
self._error = 'Packet number out of sequence (%s != %s)' % (packet_number, self.number)
return False
if len(self.buffer) - 4 < packet_length:
return False
self.data, self.buffer = self.buffer[4: 4+packet_length], self.buffer[4+packet_length:]
if self.error:
return False
return True
def dump(self):
print('dump:', self.data)
class Greeting(Packet):
def __init__(self, data):
self.data = data
self.parse()
@property
def autocommit(self):
return bool(self.server_status & SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
def parse(self):
data = self.data
i = 0
self.protocol_version = util.byte2int(data[i:i+1])
i += 1
server_end = data.find(b'\0', i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset.charset_by_id(lang).name
self.server_status = stat
self.server_capabilities |= cap_h << 16
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
i += salt_len
i += 1
# AUTH PLUGIN NAME may appear here.
if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i:
# Due to Bug#59453 the auth-plugin-name is missing the terminating
# NUL-char in versions prior to 5.5.10 and 5.6.2.
# ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
# didn't use version checks as mariadb is corrected and reports
# earlier than those two.
server_end = data.find(b'\0', i)
if server_end < 0: # pragma: no cover - very specific upstream bug
# not found \0 and last field so take it all
self._auth_plugin_name = data[i:].decode('latin1')
else:
self._auth_plugin_name = data[i:server_end].decode('latin1')
return self
def dump(self):
print('protocol_version', self.protocol_version)
print('server_version', self.server_version)
print('server_thread_id', self.server_thread_id)
print('salt', self.salt)
print('server_capabilities', self.server_capabilities)
print('server_language', self.server_language)
print('server_charset', self.server_charset)
print('server_status', self.server_status)
print('_auth_plugin_name', getattr(self, '_auth_plugin_name'))
class FieldDescriptorPacket(Packet):
def __init__(self, data, encoding):
super(FieldDescriptorPacket, self).__init__(data)
self.parse(encoding)
def parse(self, encoding):
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(e |
eteq/ginga | ginga/qtw/plugins/Header.py | Python | bsd-3-clause | 6,410 | 0.000936 | #
# Header.py -- FITS Header plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Bunch
from ginga.misc.plugins.HeaderBase import HeaderBase
import ginga.util.six as six
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
class Header(HeaderBase):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Header, self).__init__(fv)
self._image = None
def build_gui(self, container):
nb = QtHelp.StackedWidget()
self.nb = nb
cw = container.get_widget()
cw.addWidget(nb, stretch=0)
def _create_header_window(self, info):
widget = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(2, 2, 2, 2)
widget.setLayout(vbox)
table = QtGui.QTableView()
self.table = table
table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
table.setShowGrid(False)
vh = table.verticalHeader()
# Hack to make the rows in a TableView all have a
# reasonable height for the data
if QtHelp.have_pyqt5:
# NOTE: this makes a terrible hit on performance--DO NOT USE!
#vh.setSectionResizeMode(QtGui.QHeaderView.ResizeToContents)
vh.setSectionResizeMode(QtGui.QHeaderView.Fixed)
else:
# NOTE: this makes a terrible hit on performance--DO NOT USE!
#vh.setResizeMode(QtGui.QHeaderView.ResizeToContents)
vh.setResizeMode(QtGui.QHeaderView.Fixed)
vh.setDefaultSectionSize(18)
# Hide vertical header
vh.setVisible(False)
vbox.addWidget(table, stretch=1)
# create sort toggle
cb = QtGui.QCheckBox("Sortable")
cb.stateChanged.connect(lambda tf: self.set_sortable_cb(info))
hbox = QtHelp.HBox()
hbox.addWidget(cb, stretch=0)
vbox.addWidget(hbox, stretch=0)
# toggle sort
if self.settings.get('sortable', False):
cb.setCheckState(QtCore.Qt.Checked)
info.setvals(widget=widget, table=table, sortw=cb)
| return widget
def set_header(self, info, image):
if self._image == image:
# we've already handled this header
return
self.logger.debug("setting header")
| header = image.get_header()
table = info.table
model = HeaderTableModel(self.columns, header)
table.setModel(model)
selectionModel = QtHelp.QItemSelectionModel(model, table)
table.setSelectionModel(selectionModel)
# set column width to fit contents
# NOTE: this makes a terrible hit on performance--DO NOT USE!
## table.resizeColumnsToContents()
## table.resizeRowsToContents()
is_sorted = info.sortw.isChecked()
table.setSortingEnabled(is_sorted)
self.logger.debug("setting header done ({0})".format(is_sorted))
self._image = image
def add_channel(self, viewer, chinfo):
chname = chinfo.name
info = Bunch.Bunch(chname=chname)
sw = self._create_header_window(info)
self.nb.addTab(sw, chname)
index = self.nb.indexOf(sw)
info.setvals(widget=sw)
self.channel[chname] = info
fitsimage = chinfo.fitsimage
fitsimage.set_callback('image-set', self.new_image_cb, info)
def delete_channel(self, viewer, chinfo):
chname = chinfo.name
self.logger.debug("deleting channel %s" % (chname))
widget = self.channel[chname].widget
self.nb.removeWidget(widget)
widget.setParent(None)
widget.deleteLater()
self.active = None
self.info = None
del self.channel[chname]
def focus_cb(self, viewer, fitsimage):
chname = self.fv.get_channelName(fitsimage)
chinfo = self.fv.get_channelInfo(chname)
chname = chinfo.name
if self.active != chname:
widget = self.channel[chname].widget
index = self.nb.indexOf(widget)
self.nb.setCurrentIndex(index)
self.active = chname
self.info = self.channel[self.active]
image = fitsimage.get_image()
if image is None:
return
self.set_header(self.info, image)
def set_sortable_cb(self, info):
self._image = None
super(Header, self).set_sortable_cb(info)
def __str__(self):
return 'header'
class HeaderTableModel(QtCore.QAbstractTableModel):
def __init__(self, columns, header):
super(HeaderTableModel, self).__init__(None)
self.columns = columns
self.header = []
# Copy cards from header into a local list
# TODO: what if the header changes underneath us?
for key in header.keys():
self.header.append(header.get_card(key))
def rowCount(self, parent):
return len(self.header)
def columnCount(self, parent):
return len(self.columns)
def data(self, index, role):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
card = self.header[index.row()]
field = self.columns[index.column()][1]
return str(card[field])
def headerData(self, col, orientation, role):
if (orientation == QtCore.Qt.Horizontal) and \
(role == QtCore.Qt.DisplayRole):
return self.columns[col][0]
# Hack to make the rows in a TableView all have a
# reasonable height for the data
elif (role == QtCore.Qt.SizeHintRole) and \
(orientation == QtCore.Qt.Vertical):
return 1
return None
def sort(self, Ncol, order):
"""Sort table by given column number.
"""
def sortfn(card):
field = self.columns[Ncol][1]
return card[field]
if QtHelp.have_pyqt4:
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
self.header = sorted(self.header, key=sortfn)
if order == QtCore.Qt.DescendingOrder:
self.header.reverse()
if QtHelp.have_pyqt4:
self.emit(QtCore.SIGNAL("layoutChanged()"))
#END
|
potzenheimer/meetshaus | src/meetshaus.blog/meetshaus/blog/browser/migration.py | Python | mit | 8,180 | 0.000489 | # -*- coding: utf-8 -*-
"""Module providing base class migration for blog entry content"""
import lxml
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from meetshaus.blog.blogpost import IBlogPost
from plone import api
from plone.portlets.interfaces import ILocalPortletAssignable, IPortletManager, \
IPortletAssignmentMapping
from zope.component import getMultiAdapter, getUtility
from zope.lifecycleevent import modified
from meetshaus.blog.blogentry import IBlogEntry
class BlogMigrationView(BrowserView):
""" Migrate blog content
Move blog entries to folderish blog posting content types and
transfer the associated images to the folder content
"""
def __call__(self):
self.has_blog_entries = len(self.blog_entries()) > 0
return self.render()
def render(self):
return self.index()
def blog_entries(self):
items = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry,
sort_on='effective',
sort_order='reverse'
)
return items
def blog_entries_count(self):
return len(self.blog_entries())
def used_image_assets(self, uuid):
item = api.content.get(UID=uuid)
html_body = item.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
image_idx = len(images)
return image_idx
class BlogMigrationRunnerView(BrowserView):
""" Blog migration runner """
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}/@@migration-finished?_authenticator={1}'.format(
base_url, authenticator.token())
self._migrate_blog_posts()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _migrate_blog_posts(self):
context = aq_inner(self.context)
migrated = []
not_migrated = []
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry
)
for brain in results:
obj = brain.getObject()
html_body = obj.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
new_item = api.content.create(
type='meetshaus.blog.blogpost',
title=obj.Title(),
description=obj.Description(),
container=context
)
setattr(new_item, 'Subject', obj.Subject())
setattr(new_item, 'text', obj.text)
api.content.transition(obj=new_item, transition='publish')
effective = obj.EffectiveDate()
new_item.setEffectiveDate(effective)
modified(new_item)
new_item.reindexObject(idxs='modified')
# for img_uid in img_list:
# img_obj = api.content.get(UID=img_uid)
# api.content.move(source=img_obj, target=new_item)
migrated.append(obj.UID())
info_message_template = 'There are {0} objects migrated.'
warn_message_template = 'There are {0} objects not migrated.'
if migrated:
msg = info_me | ssage_template.format(len(migrated))
if not_migrated:
msg = warn_message_template.format(len(not_migrated))
api.portal.s | how_message(
message=msg,
request=self.request
)
return len(migrated)
class BlogMigrationFinishedView(BrowserView):
""" Migration done """
def __call__(self):
return self.render()
def render(self):
return self.index()
class GatherAssetsView(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._gather_assets()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _collect_assets(self):
context = aq_inner(self.context)
html_body = context.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
return img_list
def _gather_assets(self):
context = aq_inner(self.context)
migrated = 0
contained_images = self._collect_assets()
for uuid in contained_images:
image = api.content.get(UID=uuid)
try:
api.content.move(source=image, target=context)
migrated += 1
except:
# catch potential errors beforehand and debug
import pdb; pdb.set_trace()
pass
modified(context)
context.reindexObject(idxs='modified')
return migrated
class CollectAssets(BrowserView):
""" Collect all assigned images and assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._collect_assets()
return self.request.response.redirect(next_url)
@staticmethod
def _collect_assets():
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogPost
)
for brain in results:
context = brain.getObject()
context.restrictedTraverse('@@gather-assets')()
return
class RemovePortletAssignments(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._cleanup_assignments()
return self.request.response.redirect(next_url)
@staticmethod
def _cleanup_assignments():
catalog = api.portal.get_tool('portal_catalog')
all_brains = catalog.searchResults()
for i in all_brains:
obj = i.getObject()
if not ILocalPortletAssignable.providedBy(obj):
continue
for manager_name in ('plone.leftcolumn','plone.rightcolumn'):
manager = getUtility(IPortletManager, name=manager_name)
assignment_mapping = getMultiAdapter((obj, manager),
IPortletAssignmentMapping)
for item in list(assignment_mapping.keys()):
del assignment_mapping[item]
|
tghw/trello-py | trello/search.py | Python | bsd-2-clause | 1,739 | 0.00345 | from .base import ApiBase
import requests
class Search(ApiBase):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, query, idBoards=None, idOrganizations=None, idCards=None, modelTypes=None, board_fields=None, boards_limit=None, card_fields=None, cards_limit=None, cards_page=None, card_board=None, card_list=None, card_members=None, card_stickers=None, card_attachments=None, organization_fields=None, organizations_limit=None, member_fields=None, members_limit=None, partial=None):
resp = requests.get("https://trello.com/1/search", params={"key": self._apikey, "token": self._token, "query": query, "idBoards": idBoards, "idOrganizations": idOrganizations, "idCards": idCards, "modelTypes": modelTypes, "board_fields": board_fields, "boards_limit": boards_limit, "card_fields": card_fields, "cards_limit": cards_limit, "cards_page": cards_page, "card_board": card_board, "card_list": card_list, "card_members": card_members, "card_stickers": card_stickers, "card_attachments": card_attachments, "organization_fields": organization_fields, "organizations_limit": organizations_limit, "member_fields": member_fields, "members_limit": members_limit, "partial": partial}, data=None)
return self.raise_or_json(resp)
def get_member(self, query, limit=None, idBoard=None, idOrganization=None, onlyOrgMember | s=None):
resp = requests.get("https://trello.com/1/search/members", params={"key": self._apikey, "token": self._token, "query": query, "limit": limit, "idBoard": idBoard, "idOrganization": | idOrganization, "onlyOrgMembers": onlyOrgMembers}, data=None)
return self.raise_or_json(resp)
|
vlegoff/tsunami | src/secondaires/crafting/editeurs/gldedit/atelier.py | Python | bsd-3-clause | 2,784 | 0.00036 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright no | tice,
# this list of condition | s and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant l'éditeur d'ateliers de guilde."""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
class GldAtelierEdit(Presentation):
"""Classe définissant l'éditeur d'ateliers."""
nom = "gldedit:atelier"
def __init__(self, personnage, atelier, attribut=None):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, atelier, None, False)
if personnage and atelier:
self.construire(atelier)
def __getnewargs__(self):
return (None, None)
def construire(self, atelier):
"""Construction de l'éditeur"""
# Nom
nom = self.ajouter_choix("nom", "n", Uniligne, atelier, "nom")
nom.parent = self
nom.prompt = "Nom de la atelier : "
nom.apercu = "{valeur}"
nom.aide_courte = \
"Entrez le |ent|nom|ff| de l'atelier ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nNom actuel : " \
"|bc|{valeur}|ff|"
|
google-research/federated | reconstruction/reconstruction_model.py | Python | apache-2.0 | 5,300 | 0.003585 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstractions for Federated Reconstruction Models."""
import abc
import attr
@attr.s(eq=False, frozen=True, slots=True)
class BatchOutput(object):
"""A structure that holds the output of a `ReconstructionModel`.
Note: All fields are optional (may be None).
- `predictions`: Tensor of predictions on the examples.
- `labels`: Tensor of labels for the examples.
- `num_examples`: tf.int32 scalar number of examples seen in the batch.
"""
predictions = attr.ib()
labels = attr.ib()
num_examples = attr.ib()
class ReconstructionModel(object, metaclass=abc.ABCMeta):
"""Represents a reconstruction model for use in Tensorflow Federated.
`ReconstructionModel`s are used to train models that reconstruct a set of
their variables on device, never sharing those variables with the server.
Each `ReconstructionModel` will work on a set of `tf.Variables`, and each
method should be a computation that can be implemented as a `tf.function`;
this implies the class should essentially be stateless from a Python
perspective, as each method will generally only be traced once (per set of
arguments) to create the corresponding TensorFlow graph functions. Thus,
`ReconstructionModel` instances should behave as expected in both eager and
graph (TF 1.0) usage.
In general, `tf.Variables` may be either:
* Weights, the variables needed to make predictions with the model.
* Local variables, e.g. to accumulate aggregated metrics across
calls to forward_pass.
The weights can be broken down into:
* Global variables: Variables that are allowed to be aggregated on the
server.
* Local variables: Variables that cannot leave the device.
Furthermore, both of these types of variables can be:
* Trainable variables: These can and should be trained using gradient-based
methods.
* Non-trainable variables: Could include fixed pre-trained layers or static
model data.
These variables are provided via:
* `global_trainable_variables`
* `global_non_trainable_variables`
* `local_trainable_variables`
* `local_non_trainable_variables`
properties, and must be initialized by the user of the `ReconstructionModel`.
While training a reconstruction model, global trainable variables will
generally be provided by the server. Local trainable variables will then be
reconstructed locally. Updates to the global trainable variables will be sent
back to the server. Local variables are not transmitted.
All `tf.Variables` should be introduced in `__init__`; this could move to a
`build` method more inline with Keras (see
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) in
the future.
"""
@abc.abstractproperty
def global_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def globa | l_non_trainable_variables | (self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_non_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def input_spec(self):
"""The type specification of the `batch_input` parameter for `forward_pass`.
A nested structure of `tf.TensorSpec` objects, that matches the structure of
arguments that will be passed as the `batch_input` argument of
`forward_pass`. The tensors must include a batch dimension as the first
dimension, but the batch dimension may be undefined.
"""
pass
@abc.abstractmethod
def forward_pass(self, batch_input, training=True):
"""Runs the forward pass and returns results.
This method should not modify any variables that are part of the model
parameters, that is, variables that influence the predictions. Rather, this
is done by the training loop.
Args:
batch_input: a nested structure that matches the structure of
`ReconstructionModel.input_spec` and each tensor in `batch_input`
satisfies `tf.TensorSpec.is_compatible_with()` for the corresponding
`tf.TensorSpec` in `ReconstructionModel.input_spec`.
training: If `True`, run the training forward pass, otherwise, run in
evaluation mode. The semantics are generally the same as the `training`
argument to `keras.Model.__call__`; this might e.g. influence how
dropout or batch normalization is handled.
Returns:
A `BatchOutput` object.
"""
pass
|
timblakely/bigbrain | bigbrain/test.py | Python | apache-2.0 | 1,684 | 0.024347 | import time
from network.cluster_managers import google_compute_cluster_manager as gccm
cm = gccm.GoogleComputeClusterManager()
cm.Initialize('bats','12345','32123')
cm.Heartbeat()
cm.ListInstanceNames()
last = ''
print 'NumInstances: %i' % cm.NumInstances()
for instance in cm.ListInstanceNames():
print ' > %s' % instance
last = instance
print 'Started:'
for a,b in cm.IsStarted().iteritems():
print ' > %s -> %r' % (a,b)
print 'Started - specific:'
for a,b in cm.IsStarted([last]).iteritems():
print ' + %s -> %r' % (a,b)
print 'Failed:'
for a,b in cm.IsFailed().iteritems():
print ' > %s -> %r' % (a,b)
print 'Failed - specific:'
for a,b in cm.IsFailed([last]).iteritems():
print ' + %s -> %r' % (a,b)
print 'Running:'
for a,b in cm.IsRunning().iteritems():
print ' > %s -> %r' % (a,b)
print 'Running - specific:'
for a,b in cm.IsRunning([last]).iteritems():
print ' + %s -> %r' % (a,b)
print 'Status:'
for a,b in cm.GetStatus().iteritems():
print ' > %s -> %s' % (a,b)
print 'Status - specific:'
for a,b in cm.GetStatus([last]).iteritems() | :
print ' + %s -> %s' % (a,b)
name = cm.StartNewInstance()
waited = 0
while not cm.IsRunning(name) and not cm.IsFailed(name):
print """Time %3i:
%s
> started: %r
> running: %r
> failed: %r
> status: %s""" % (waited, name, cm.IsStarted(name), cm.IsRunning(name), cm.IsFailed(name), cm.GetStatus(name))
time.sleep(1.5)
cm.Heartbeat()
time.sleep(1.5)
waited += 3
print """Time %3i:
%s
> started: %r
> running: %r
> | failed: %r
> status: %s""" % (waited, name, cm.IsStarted(name), cm.IsRunning(name), cm.IsFailed(name), cm.GetStatus(name))
|
ericl1u/eece7398 | eventBasedAnimationClass.py | Python | gpl-3.0 | 1,836 | 0.003268 | # Taken directly from CMU 15112 Course notes:
# http://www.cs.cmu.edu/~112/notes/eventBasedAnimationClass.py
# eventBasedAnimationClass.py
from Tkinter import *
class EventBasedAnimationClass(object):
def onMousePressed(self, event): pass
def onKeyPressed(self, event): pass
def onTimerFired(self): pass
def redrawAll(self): pass
def initAnimation(self): pass
def __init__(self, width=300, height=300):
self.width = width
self.height = height
self.timerDelay = 250 # in milliseconds (set to None to turn off timer)
def onMousePressedWrapper(self, event):
self.onMousePressed(event)
self.redrawAll()
def onKeyPressedWrapper(self, event):
self.onKeyPressed(event)
self.redrawAll()
def onTimerFiredWrapper(self):
if (self.timerDelay == None):
return # turns off timer
self.onTimerFired()
self.redrawAll()
self.canvas.after(self.timerDelay, self.onTimerFiredWrapper)
def run(self):
# create the root and the canvas
self.root = Tk()
self.canvas = Canvas(self.root, width=self.width, height=self.height)
self.canvas.pack()
self.initAnimation()
# set up events
# DK: You can use a local function with a closure
# to store the canvas binding, like this:
def f(event): self.onMousePressedWrapper(event)
self.root.bind("<Button-1>", f)
| # DK: Or you can just use an anonymous lamdba function, like this:
self.root.bind("<Key>", lambda event: self.onKeyPressedWrapper(event)) |
self.onTimerFiredWrapper()
# and launch the app (This call BLOCKS, so your program waits
# until you close the window!)
self.root.mainloop()
# EventBasedAnimationClass(300,300).run()
|
ngageoint/scale | scale/job/execution/container.py | Python | apache-2.0 | 2,514 | 0.003182 | """Defines the methods for handling file systems in the job execution's local container volume"""
from __future__ import unicode_literals
import os
from storage.container import SCALE_ROOT_PATH
SCALE_JOB_EXE_INPUT_PATH = os.path.join(SCALE_ROOT_PATH, 'input_data')
SCALE_JOB_EXE_OUTPUT_PATH = os.path.join(SCALE_ROOT_PATH, 'output_data')
SCALE_INPUT_METADATA_PATH = os.path.join(SCALE_ROOT_PATH, 'input_data', 'scale-input-metadata.json')
def get_job_exe_input_vol_name(job_exe):
"""Returns the container input volume name for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:returns: The container input volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_input_data' % job_exe.get_cluster_id()
def get_job_exe_output_vol_name(job_exe):
"""Returns the container output volume name for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:returns: The container output volume name |
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_output_data' % job_exe.get_cluster_id()
def get_mount_volume_name(job_exe, mount_name):
"""Returns the name of the mount's container volume for the given job execution
:param job_exe: The job execution model (must not be queued) with | related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:param mount_name: The name of the mount
:type mount_name: string
:returns: The mount's container volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_mount_%s' % (job_exe.get_cluster_id(), mount_name)
def get_workspace_volume_name(job_exe, workspace):
"""Returns the name of the workspace's container volume for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:param workspace: The name of the workspace
:type workspace: string
:returns: The workspace's container volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_wksp_%s' % (job_exe.get_cluster_id(), workspace)
|
quake0day/oj | Simplify Path.py | Python | mit | 708 | 0.055085 | class Solution:
# @param {string} path the original path
# @return {string} the simplified path
def simplifyPath(self, path):
# Write your code here
stack = []
i = 0
res = ""
while i < len(path):
end = i + 1
| while end < len(path) and path[end] != "/":
end += 1
sub = path[i+1 : end]
if len(sub) > 0:
if sub == "..":
if stack != []:
stack.pop()
elif sub != ".":
stack.append(sub)
i = end
if stack == []:
return "/"
for i in stack:
res += "/"+i
return re | s
a = Solution()
print a.simplifyPath("/...") |
alefnula/samovar | src/samovar/parsing/__init__.py | Python | bsd-3-clause | 355 | 0 | __author__ = 'Viktor Kerkez <alefnula@gmail.com>'
__date__ = '19 January 2013'
__copyright__ = 'C | opyright (c) 2013 Viktor Kerkez'
from .token import *
from .style import *
from .lexer import *
from .formatter import *
__all__ = ['Token', 'Lexer', 'RegexLexer', 'Formatte | r', 'ConsoleFormatter',
'Style', 'StyleAdapter', 'ConsoleStyleAdapter']
|
renalreg/radar | fabfile.py | Python | agpl-3.0 | 4,141 | 0.002415 | from contextlib import contextmanager
import binascii
import os
from pathlib import Path
import tempfile
from fabric import task
from pkg_resources import parse_version
DEFAULT_DIST_DIR = "dist"
# run with
# fab -H root@some_host --prompt-for-login-password deploy|build
@contextmanager
def temp(c):
randomstr = binascii.hexlify(os.urandom(20)). | decode("utf-8") |
tmp = "/tmp/radar-{0}".format(randomstr)
c.run("mkdir {0}".format(tmp))
with c.cd(tmp):
yield tmp
c.run("rm -rf {0}".format(tmp))
@task
def build(c, rev="HEAD"):
archive = tempfile.mktemp(suffix=".tar.gz")
c.local(
'git archive "{rev}" | gzip > "{archive}"'.format(rev=rev, archive=archive),
env=os.environ
)
with temp(c) as cwd:
c.put(archive, cwd + "/src.tar.gz")
c.run("tar -xzf src.tar.gz")
c.run(
"PATH=/usr/pgsql-9.4/bin:$PATH "
"platter build "
"--virtualenv-version 15.1.0 "
"-p python3 -r requirements.txt ."
)
if not os.path.exists(DEFAULT_DIST_DIR):
os.makedirs(DEFAULT_DIST_DIR)
result = c.run("ls " + cwd + "/dist/")
fname = result.stdout.strip()
c.get(cwd + "/dist/" + fname, os.path.join(DEFAULT_DIST_DIR, fname))
path = Path(archive)
if path.exists():
path.unlink()
@task
def deploy(c, archive=None, name="radar"):
if archive is None:
archive = os.path.join("dist", sorted(os.listdir("dist"), key=parse_version)[-1])
with temp(c) as cwd:
c.put(archive, cwd + "/radar.tar.gz")
c.run("tar --strip-components=1 -xzf radar.tar.gz")
version = c.run("cat VERSION").stdout.strip()
current_version = "/srv/{name}/current".format(name=name)
new_version = "/srv/{name}/{version}".format(name=name, version=version)
c.run("rm -rf {0}".format(new_version))
c.run(cwd + "/install.sh {0}".format(new_version))
c.run("ln -sfn {0} {1}".format(new_version, current_version))
services = [
"radar-admin",
"radar-api",
"radar-ukrdc-exporter-celery",
"radar-ukrdc-importer-api",
"radar-ukrdc-importer-celery",
]
# Restart services
# TODO replace with try-reload-or-restart when available in our version of systemd
cmd = "if systemctl is-active {0} >/dev/null; then systemctl reload-or-restart {0}; fi"
for service in services:
print("Running: ", cmd.format(service))
c.run(cmd.format(service))
# @task
# def dump():
# with temp():
# run_db('dump radar.sql')
# get('radar.sql', '.')
# def run_db(args):
# run(
# "RADAR_SETTINGS=/etc/radar-api/settings.py /srv/radar/current/bin/radar-db {0}".format(
# args
# )
# )
# def run_fixtures(args):
# run(
# "RADAR_SETTINGS=/etc/radar-api/settings.py /srv/radar/current/bin/radar-fixtures {0}".format(
# args
# )
# )
# @task
# def staging():
# answer = prompt(
# 'Are you sure you want to DELETE ALL DATA on "{0}" '
# 'and replace it with test data? (type "I am sure" to continue):'.format(
# env.host_string
# )
# )
# if answer != "I am sure":
# abort("Aborted!")
# run_fixtures("all")
# run_fixtures('all')
# @task
# def demo():
# answer = prompt(
# 'Are you sure you want to DELETE ALL DATA on "{0}" '
# 'and replace it with demo data? (type "I am sure" to continue):'.format(
# env.host_string
# )
# )
# if answer != "I am sure":
# abort("Aborted!")
# password = None
# while not password:
# password = prompt("Choose a password:")
# with temp():
# put("radar.sql", "radar.sql")
# run_db("drop")
# run_db("create")
# run_db("restore radar.sql") # Note: user must be a PostgreSQL superuser to run this
# run_fixtures("users --password {0}".format(password))
# run_fixtures("patients --patients 95 --no-data")
# run_fixtures("patients --patients 5 --data")
|
plotly/python-api | packages/python/plotly/plotly/validators/funnel/hoverlabel/font/_colorsrc.py | Python | mit | 470 | 0.002128 | import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_ | name="funnel.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name= | parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/datastore/datastore_v3_pb.py | Python | bsd-3-clause | 282,355 | 0.021179 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
from google.appengine.datastore.action_pb import *
import google.appengine.datastore.action_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
from google.appengine.datastore.snapshot_pb import *
import google.appengine.datastore.snapshot_pb
class InternalHeader(ProtocolBuffer.ProtocolMessage):
has_requesting_app_id_ = 0
requesting_app_id_ = ""
has_requesting_project_id_ = 0
requesting_project_id_ = ""
has_requesting_version_id_ = 0
requesting_version_id_ = ""
has_api_settings_ = 0
api_settings_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def requesting_app_id(self): return self.requesting_app_id_
def set_requesting_app_id(self, x):
self.has_requesting_app_id_ = 1
self.requesting_app_id_ = x
def clear_requesting_app_id(self):
if self.has_requesting_app_id_:
self.has_requesting_app_id_ = 0
self.requesting_app_id_ = ""
def has_requesting_app_id(self): return self.has_requesting_app_id_
def requesting_project_id(self): return self.requesting_project_id_
def set_requesting_project_id(self, x):
self.has_requesting_project_id_ = 1
self.requesting_project_id_ = x
def clear_requesting_project_id(self):
if self.has_requesting_project_id_:
self.has_requesting_project_id_ = 0
self.requesting_project_id_ = ""
def has_requesting_project_id(self): return self.has_requesting_project_id_
def requesting_version_id(self): return self.requesting_version_id_
def set_requesting_version_id(self, x):
self.has_requesting_version_id_ = 1
self.requesting_version_id_ = x
def clear_requesting_version_id(self):
if self.has_requesting_version_id_:
self.has_requesting_version_id_ = 0
self.requesting_version_id_ = ""
def has_requesting_version_id(self): return self.has_requesting_version_id_
def api_settings(self): return self.api_settings_
| def set_api_settings(self, x):
self.has_api_settings_ = 1
self.api_settings_ = x
def clear_api_settings(self):
if self.has_api_settings_:
self.has_api_settings_ = 0
self.api_settings_ = ""
def ha | s_api_settings(self): return self.has_api_settings_
def MergeFrom(self, x):
assert x is not self
if (x.has_requesting_app_id()): self.set_requesting_app_id(x.requesting_app_id())
if (x.has_requesting_project_id()): self.set_requesting_project_id(x.requesting_project_id())
if (x.has_requesting_version_id()): self.set_requesting_version_id(x.requesting_version_id())
if (x.has_api_settings()): self.set_api_settings(x.api_settings())
def Equals(self, x):
if x is self: return 1
if self.has_requesting_app_id_ != x.has_requesting_app_id_: return 0
if self.has_requesting_app_id_ and self.requesting_app_id_ != x.requesting_app_id_: return 0
if self.has_requesting_project_id_ != x.has_requesting_project_id_: return 0
if self.has_requesting_project_id_ and self.requesting_project_id_ != x.requesting_project_id_: return 0
if self.has_requesting_version_id_ != x.has_requesting_version_id_: return 0
if self.has_requesting_version_id_ and self.requesting_version_id_ != x.requesting_version_id_: return 0
if self.has_api_settings_ != x.has_api_settings_: return 0
if self.has_api_settings_ and self.api_settings_ != x.api_settings_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
return n
def Clear(self):
self.clear_requesting_app_id()
self.clear_requesting_project_id()
self.clear_requesting_version_id()
self.clear_api_settings()
def OutputUnchecked(self, out):
if (self.has_requesting_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.requesting_app_id_)
if (self.has_api_settings_):
out.putVarInt32(26)
out.putPrefixedString(self.api_settings_)
if (self.has_requesting_project_id_):
out.putVarInt32(34)
out.putPrefixedString(self.requesting_project_id_)
if (self.has_requesting_version_id_):
out.putVarInt32(42)
out.putPrefixedString(self.requesting_version_id_)
def OutputPartial(self, out):
if (self.has_requesting_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.requesting_app_id_)
if (self.has_api_settings_):
out.putVarInt32(26)
out.putPrefixedString(self.api_settings_)
if (self.has_requesting_project_id_):
out.putVarInt32(34)
out.putPrefixedString(self.requesting_project_id_)
if (self.has_requesting_version_id_):
out.putVarInt32(42)
out.putPrefixedString(self.requesting_version_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_requesting_app_id(d.getPrefixedString())
continue
if tt == 26:
self.set_api_settings(d.getPrefixedString())
continue
if tt == 34:
self.set_requesting_project_id(d.getPrefixedString())
continue
if tt == 42:
self.set_requesting_version_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_requesting_app_id_: res+=prefix+("requesting_app_id: %s\n" % self.DebugFormatString(self.requesting_app_id_))
if self.has_requesting_project_id_: res+=prefix+("requesting_project_id: %s\n" % self.DebugFormatString(self.requesting_project_id_))
if self.has_requesting_version_id_: res+=prefix+("requesting_version_id: %s\n" % self.DebugFormatString(self.requesting_version_id_))
if self.has_api_settings_: res+=prefix+("api_settings: %s\n" % self.DebugFormatString(self.api_settings_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
krequesting_app_id = 2
krequesting_project_id = 4
krequesting_version_id = 5
kapi_settings = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "requesting_app_id",
|
kk47/Python | django/td2.0/app/models.py | Python | lgpl-3.0 | 4,820 | 0.042087 | # coding: utf-8
from django.db import models
class Room(models.Model):
U_NUMBER = (
(u'世纪互联','世纪互联'),
(u'鹏博士IDC','鹏博士IDC'),
(u'兆维','兆维')
)
jifang = models.CharField(verbose_name="机房名称",max_length=10,choices=U_NUMBER,default=0)
jigui = models.CharField(verbose_name="机柜号",max_length=20,unique=True)
start_time = models.DateTimeField(verbose_name="起始时间")
end_time = models.DateTimeField(verbose_name="截止时间")
class Meta:
verbose_name="机房"
verbose_name_plural="机房"
def __unicode__(self):
return self.jigui
class Switch(models.Model):
U_device = (
(u'route','route'),
(u'switch','switch'),
(u'firewall','firewall')
)
ip = models.IPAddressField(verbose_name="IP",max_length=50,unique=True)
device = models.CharField(verbose_name="设备类型",max_length=10,choices=U_device,default=0)
devices = models.CharField(verbose_name="设备型号",max_length=60)
port = models.IntegerField(max_length=2,verbose_name='交换机总口数')
paihao = models.IntegerField(max_length=2, verbose_name='交换机位置')
idroom = models.ForeignKey(Room,on_delete=models.PROTECT)
def __unicode__(self):
return self.ip
class Mac(models.Model):
U_NUMBER = (
(u'1U','1U'),
(u'2U','2U'),
(u'4U','4U'),
(u'6U','6U'),
(u'刀片机','刀片机')
)
DISK = (
(u'146G*6','146G*6'),
(u'146G*8','146G*8'),
(u'300G','300G'),
(u'300G*6','300G*6'),
(u'300G*8','300G*8'),
(u'500G*6','500G*6'),
(u'500G*8','500G*8'),
(u'1TB*6','1TB*6'),
(u'1TB*8','1TB*8'),
(u'2TB*6','2TB*6'),
(u'2TB*8','2TB*8')
)
MEM = (
(u'4G*2','4G*2'),
(u'4G*3','4G*3'),
(u'4G*4','4G*4'),
(u'4G*5','4G*5'),
(u'4G*6','4G*6'),
(u'4G*7','4G*7'),
(u'4G*8','4G*8'),
(u'4G*9','4G*9'),
(u'4G*10','4G*10'),
(u'4G*12','4G*12'),
(u'4G*14','4G*14'),
(u'4G*16','4G*16'),
(u'4G*18','4G*18')
)
eth0 = models.CharField(verbose_name="eth0 MAC",max_length=50,unique=True)
eth1 = models.CharField(verbose_name="eth1 MAC",max_length=50,unique=True)
eth2 = models.CharField(verbose_name="eth2 MAC",max_length=50,unique=True)
eth3 = models.CharField(verbose_name="eth3 MAC",max_length=50,unique=True)
qcode = models.CharField(verbose_name="快速服务代码",max_length=50,unique=True)
cpu = models.CharField(verbose_name="CPU",max_length=30)
mem = models.CharField(verbose_name="内存",max_length=30,choices=MEM,default=0)
disk = models.CharField(verbose_name="硬盘",max_length=30,choices=DISK,default=0)
uname = models.CharField(verbose_name="U数",max_length=10,choices=U_NUMBER,default=0)
paihao = models.IntegerField(max_length=2, verbose_name='服务器位置')
idroom = models.ForeignKey(Room,on_delete=models.PROTECT)
def __unicode__(self):
return self.eth0,self.eth1,self.eth2,self.eth3
class Server(models.Model):
U_device = (
(u'server','server'),
(u'vm','vm')
)
ip = models.IPAddressField(verbose_name="IP",max_length=50,unique=True)
device = models.CharField(verbose_name="设备类型",max_length=10,choices=U_device,default=0)
devices = models.CharField(verbose_name="设备型号",max_length=60)
mouth = models.IntegerField(verbose_name="对联交换口",max_length=2)
fuwu = models.CharField(verbose_name="运行服务",max_length=30)
version = models.CharField(verbose_name="服务版本",max_length=30)
ports = models.IntegerField(verbose_name="服务端口" | ,max_length=2)
configid = models.CharField(verbose_name="Configld",max_length=30)
whoandyou = models.CharField(verbose_name="被谁依赖",max_length=30)
youandwho = models.CharField(verbose_name="依赖于谁",max_length=30)
start_time = models.DateTimeField(verbose_name="起始时间")
end_time = models.DateTimeField(verbose_name="截至时间")
is_avlie = models.IntegerField(max_length=1,verbose_name="机器是否存活",default=1)
idroom = models.ForeignKey(Room,on_delete=models.PROTEC | T)
idmac = models.ForeignKey(Mac,on_delete=models.PROTECT,related_name='Mac__paihao')
idswitch = models.ForeignKey(Switch,on_delete=models.PROTECT)
def __unicode__(self):
return self.ip
class Repair(models.Model):
repair = models.TextField(verbose_name="维修记录",max_length=100)
idmac = models.ForeignKey(Mac,on_delete=models.PROTECT)
def __unicode__(self):
return self.repair
|
bloomark/f13x | assign_public_addresses.py | Python | bsd-3-clause | 2,207 | 0.004078 | import hyperdex.client
import smtplib
from bitcoin.core import COIN, b2lx
import bitcoin.wallet
import bitcoin.rpc
try:
from bitcoin.case58 import CBitcoinAddress
except:
from bitcoin.wallet import CBitcoinAddress
from config import SERVER_DB_ADDRESS, MAIL_USERNAME, MAIL_PASSWORD, NAME
c = hyperdex.client.Client(SERVER_DB_ADDRESS, 1982)
EMAIL = MAIL_USERNAME
username = MAIL_USERNAME
password = MAIL_PASSWORD
server = smtplib.SMTP("smtp.gmail.com:587")
server.starttls()
server.login(username,password)
bitcoin.SelectParams(NAME)
rpc = bitcoin.rpc.Proxy(timeout=900)
uninitiated_users = list(c.search('users', {'bitcoin_address': ''}))
for user in uninitiated_users:
x = c.begin_transaction()
num = c.count('bitcoin_addresses', {})
if num == 0:
server.sendmail(EMAIL, EMAIL, "[BTC_KEY] Empty")
break
else:
pub_key = ''
bitcoin_addresses = c.sorted_search('bitcoin_addresses', {}, 'pub_key_foo', 1, 'min')
for bitcoin_key in bitcoin_addresses:
pub_key = bitcoin_key['pub_key_foo']
x.delete('bitcoin_addresses', pub_key)
x.put('users', user['email'], {'bitcoin_address': pub_key})
rpc.impo | rtaddress(pub_key, label=user['email'])
server.sendmail(EMAIL, EMAIL, "[BTC_KEY] %s assigned to %s" % (pub_key, user['email']))
x.commi | t()
uninitiated_users = list(c.search('users', {'dogecoin_address': ''}))
for user in uninitiated_users:
x = c.begin_transaction()
num = c.count('dogecoin_addresses', {})
if num == 0:
server.sendmail(EMAIL, EMAIL, "[DOGE_KEY] Empty")
break
else:
pub_key = ''
dogecoin_addresses = c.sorted_search('dogecoin_addresses', {}, 'pub_key_foo', 1, 'min')
for dogecoin_key in dogecoin_addresses:
pub_key = dogecoin_key['pub_key_foo']
x.delete('dogecoin_addresses', pub_key)
x.put('users', user['email'], {'dogecoin_address': pub_key})
# Dogecoin doesn't yet support import address :(
# dogecoin_rpc.importaddress(pub_key, label=user['email'])
server.sendmail(EMAIL, EMAIL, "[DOGE_KEY] %s assigned to %s" % (pub_key, user['email']))
x.commit()
server.quit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.