repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
eduNEXT/edx-platform | openedx/core/djangoapps/session_inactivity_timeout/middleware.py | Python | agpl-3.0 | 2,044 | 0.003425 | """
Middleware to auto-expire inactive sessions after N seconds, which is configurable in
settings.
To enable this feature, set in a settings.py:
SESSION_INACTIVITY_TIMEOUT_IN_SECS = 300
This was taken from StackOverflow (http://stackoverflow.com/questions/14830669/how-to-expire-django-session-in-5minutes)
"""
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import auth
from django.utils.deprecation import MiddlewareMixin
LAST_TOUCH_KEYNAME = 'SessionInactivityTimeout:last_touch'
class SessionInactivityTimeout(MiddlewareMixin):
"""
Middleware class to keep track of activity on a given session
"""
def process_request(self, request):
"""
Standard entry point for processing requests in Django
"""
if not hasattr(request, "user") or not request.user.is_authenticated:
#Can't log out if not logged in
return
timeout_in_seconds = getattr(settings, "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", None)
# Do we have this feature enabled?
if timeout_in_seconds:
# what time is it now?
utc_now = datetime.utcnow()
# Get the last ti | me user made a request to server, which is stored in session data
last_touch = request.session.get(LAST_TOUCH_KEYNAME)
# have we stored a 'last visited' in session? NOTE: first time access after login
# this ke | y will not be present in the session data
if last_touch:
# compute the delta since last time user came to the server
time_since_last_activity = utc_now - last_touch
# did we exceed the timeout limit?
if time_since_last_activity > timedelta(seconds=timeout_in_seconds):
# yes? Then log the user out
del request.session[LAST_TOUCH_KEYNAME]
auth.logout(request)
return
request.session[LAST_TOUCH_KEYNAME] = utc_now
|
jarped/QGIS | python/plugins/db_manager/dlg_sql_window.py | Python | gpl-2.0 | 12,633 | 0.001662 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, QObject, QSettings, QByteArray, SIGNAL, pyqtSignal
from PyQt4.QtGui import QDialog, QWidget, QAction, QKeySequence, \
QDialogButtonBox, QApplication, QCursor, QMessageBox, QClipboard, QInputDialog, QIcon
from PyQt4.Qsci import QsciAPIs
from qgis.core import QgsProject
from .db_plugins.plugin import BaseError
from .dlg_db_error import DlgDbError
from .dlg_query_builder import QueryBuilderDlg
try:
from qgis.gui import QgsCodeEditorSQL
except:
from .sqledit import SqlEdit
from qgis import gui
gui.QgsCodeEditorSQL = SqlEdit
from .ui.ui_DlgSqlWindow import Ui_DbManagerDlgSqlWindow as Ui_Dialog
import re
class DlgSqlWindow(QWidget, Ui_Dialog):
nameChanged = pyqtSignal(str)
def __init__(self, iface, db, parent=None):
QWidget.__init__(self, parent)
self.iface = iface
self.db = db
self.setupUi(self)
self.setWindowTitle(
u"%s - %s [%s]" % (self.windowTitle(), db.connection().connectionName(), db.connection().typeNameString()))
self.defaultLayerName = 'QueryLayer'
self.editSql.setFocus()
self.editSql.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.initCompleter()
# allow to copy results
copyAction = QAction("copy", self)
self.viewResult.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResu | lts)
self.btnExecute.clicked.connect(self.executeSql)
self.btnClear.clicked.connect(self.clearSql)
self.presetStore.clicked.connect(self.storePreset)
self.presetDelete.clicked.connect(self | .deletePreset)
self.presetCombo.activated[str].connect(self.loadPreset)
self.presetCombo.activated[str].connect(self.presetName.setText)
self.updatePresetsCombobox()
# hide the load query as layer if feature is not supported
self._loadAsLayerAvailable = self.db.connector.hasCustomQuerySupport()
self.loadAsLayerGroup.setVisible(self._loadAsLayerAvailable)
if self._loadAsLayerAvailable:
self.layerTypeWidget.hide() # show if load as raster is supported
self.loadLayerBtn.clicked.connect(self.loadSqlLayer)
self.getColumnsBtn.clicked.connect(self.fillColumnCombos)
self.loadAsLayerGroup.toggled.connect(self.loadAsLayerToggled)
self.loadAsLayerToggled(False)
self._createViewAvailable = self.db.connector.hasCreateSpatialViewSupport()
self.btnCreateView.setVisible(self._createViewAvailable)
if self._createViewAvailable:
self.btnCreateView.clicked.connect(self.createView)
self.queryBuilderFirst = True
self.queryBuilderBtn.setIcon(QIcon(":/db_manager/icons/sql.gif"))
self.queryBuilderBtn.clicked.connect(self.displayQueryBuilder)
self.presetName.textChanged.connect(self.nameChanged)
def updatePresetsCombobox(self):
self.presetCombo.clear()
names = []
entries = QgsProject.instance().subkeyList('DBManager', 'savedQueries')
for entry in entries:
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/' + entry + '/name')[0]
names.append(name)
for name in sorted(names):
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(-1)
def storePreset(self):
query = self._getSqlQuery()
if query == "":
return
name = self.presetName.text()
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name', name)
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query', query)
index = self.presetCombo.findText(name)
if index == -1:
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(self.presetCombo.count() - 1)
else:
self.presetCombo.setCurrentIndex(index)
def deletePreset(self):
name = self.presetCombo.currentText()
QgsProject.instance().removeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()))
self.presetCombo.removeItem(self.presetCombo.findText(name))
self.presetCombo.setCurrentIndex(-1)
def loadPreset(self, name):
query = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query')[0]
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name')[0]
self.editSql.setText(query)
def loadAsLayerToggled(self, checked):
self.loadAsLayerGroup.setChecked(checked)
self.loadAsLayerWidget.setVisible(checked)
def clearSql(self):
self.editSql.clear()
self.editSql.setFocus()
def executeSql(self):
sql = self._getSqlQuery()
if sql == "":
return
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# delete the old model
old_model = self.viewResult.model()
self.viewResult.setModel(None)
if old_model:
old_model.deleteLater()
self.uniqueCombo.clear()
self.geomCombo.clear()
try:
# set the new model
model = self.db.sqlResultModel(sql, self)
self.viewResult.setModel(model)
self.lblResult.setText(self.tr("%d rows, %.1f seconds") % (model.affectedRows(), model.secs()))
except BaseError as e:
QApplication.restoreOverrideCursor()
DlgDbError.showError(e, self)
return
cols = sorted(self.viewResult.model().columnNames())
self.uniqueCombo.addItems(cols)
self.geomCombo.addItems(cols)
self.update()
QApplication.restoreOverrideCursor()
def loadSqlLayer(self):
hasUniqueField = self.uniqueColumnCheck.checkState() == Qt.Checked
if hasUniqueField:
uniqueFieldName = self.uniqueCombo.currentText()
else:
uniqueFieldName = None
hasGeomCol = self.hasGeometryCol.checkState() == Qt.Checked
if hasGeomCol:
geomFieldName = self.geomCombo.currentText()
else:
geomFieldName = None
query = self._getSqlQuery()
if query == "":
return
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
from qgis.core import QgsMapLayer, QgsMapLayerRegistry
layerType = QgsMapLayer.VectorLayer if self.vectorRadio.isChecked() else QgsMapLayer.RasterLayer
# get a new layer name
names = []
for layer in QgsMapLayerRegistry.instance().mapLayers().values():
names.append(layer.name())
layerName = self.layerNameEdit.text()
if layerName == "":
layerName = self.defaultLayerName
newLa |
ity/pants | src/python/pants/engine/legacy/graph.py | Python | apache-2.0 | 11,698 | 0.008036 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from hashlib import sha1
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_graph import BuildGraph
from pants.engine.fs import Files, FilesContent, PathGlobs
from pants.engine.legacy | .structs import BundleAdaptor, BundlesField, SourcesField, TargetAdaptor
from pants.engine.nodes import Return, SelectNode, State, Throw
from pants.engine.selectors import Select, SelectDependencies, SelectProjection
from pants | .source.wrapped_globs import EagerFilesetWithSpec
from pants.util.dirutil import fast_relpath
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class LegacyBuildGraph(BuildGraph):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
This implementation is backed by a Scheduler that is able to resolve LegacyTargets.
"""
class InvalidCommandLineSpecError(AddressLookupError):
"""Raised when command line spec is not a valid directory"""
def __init__(self, scheduler, engine, symbol_table_cls):
"""Construct a graph given a Scheduler, Engine, and a SymbolTable class.
:param scheduler: A Scheduler that is configured to be able to resolve LegacyTargets.
:param engine: An Engine subclass to execute calls to `inject`.
:param symbol_table_cls: A SymbolTable class used to instantiate Target objects. Must match
the symbol table installed in the scheduler (TODO: see comment in `_instantiate_target`).
"""
self._scheduler = scheduler
self._graph = scheduler.product_graph
self._target_types = symbol_table_cls.aliases().target_types
self._engine = engine
super(LegacyBuildGraph, self).__init__()
def _index(self, roots):
"""Index from the given roots into the storage provided by the base class.
This is an additive operation: any existing connections involving these nodes are preserved.
"""
all_addresses = set()
new_targets = list()
# Index the ProductGraph.
for node, state in self._graph.walk(roots=roots):
# Locate nodes that contain LegacyTarget values.
if type(state) is Throw:
raise AddressLookupError(
'Build graph construction failed for {}:\n {}'.format(node.subject, state.exc))
elif type(state) is not Return:
State.raise_unrecognized(state)
if node.product is not LegacyTarget:
continue
if type(node) is not SelectNode:
continue
# We have a successfully parsed LegacyTarget, which includes its declared dependencies.
address = state.value.adaptor.address
all_addresses.add(address)
if address not in self._target_by_address:
new_targets.append(self._index_target(state.value))
# Once the declared dependencies of all targets are indexed, inject their
# additional "traversable_(dependency_)?specs".
deps_to_inject = set()
addresses_to_inject = set()
def inject(target, dep_spec, is_dependency):
address = Address.parse(dep_spec, relative_to=target.address.spec_path)
if not any(address == t.address for t in target.dependencies):
addresses_to_inject.add(address)
if is_dependency:
deps_to_inject.add((target.address, address))
for target in new_targets:
for spec in target.traversable_dependency_specs:
inject(target, spec, is_dependency=True)
for spec in target.traversable_specs:
inject(target, spec, is_dependency=False)
# Inject all addresses, then declare injected dependencies.
self.inject_addresses_closure(addresses_to_inject)
for target_address, dep_address in deps_to_inject:
self.inject_dependency(dependent=target_address, dependency=dep_address)
return all_addresses
def _index_target(self, legacy_target):
"""Instantiate the given LegacyTarget, index it in the graph, and return a Target."""
# Instantiate the target.
address = legacy_target.adaptor.address
target = self._instantiate_target(legacy_target.adaptor)
self._target_by_address[address] = target
# Link its declared dependencies, which will be indexed independently.
self._target_dependencies_by_address[address].update(legacy_target.dependencies)
for dependency in legacy_target.dependencies:
self._target_dependees_by_address[dependency].add(address)
return target
def _instantiate_target(self, target_adaptor):
"""Given a TargetAdaptor struct previously parsed from a BUILD file, instantiate a Target.
TODO: This assumes that the SymbolTable used for parsing matches the SymbolTable passed
to this graph. Would be good to make that more explicit, but it might be better to nuke
the Target subclassing pattern instead, and lean further into the "configuration composition"
model explored in the `exp` package.
"""
target_cls = self._target_types[target_adaptor.type_alias]
try:
# Pop dependencies, which were already consumed during construction.
kwargs = target_adaptor.kwargs()
kwargs.pop('dependencies')
# Instantiate.
return target_cls(build_graph=self, **kwargs)
except TargetDefinitionException:
raise
except Exception as e:
raise TargetDefinitionException(
target_adaptor.address,
'Failed to instantiate Target with type {}: {}'.format(target_cls, e))
def inject_synthetic_target(self,
address,
target_type,
dependencies=None,
derived_from=None,
**kwargs):
target = target_type(name=address.target_name,
address=address,
build_graph=self,
**kwargs)
self.inject_target(target,
dependencies=dependencies,
derived_from=derived_from,
synthetic=True)
def inject_address_closure(self, address):
if address in self._target_by_address:
return
for _ in self._inject([address], expect_return_values=False):
pass
def inject_addresses_closure(self, addresses):
addresses = set(addresses) - set(self._target_by_address.keys())
if not addresses:
return
for _ in self._inject(addresses, expect_return_values=False):
pass
def inject_specs_closure(self, specs, fail_fast=None):
# Request loading of these specs.
for address in self._inject(specs):
yield address
def _inject(self, subjects, expect_return_values=True):
"""
Request LegacyTargets for each of the subjects.
If `expect_return_values` is True, yield resulting Addresses.
Otherwise no resulting Addresses will be computed.
"""
logger.debug('Injecting to {}: {}'.format(self, subjects))
request = self._scheduler.execution_request([LegacyTarget, Address], subjects)
legacy_target_roots = filter(lambda root: root.product is LegacyTarget, request.roots)
address_roots = filter(lambda root: root.product is Address, request.roots)
result = self._engine.execute(request)
if result.error:
raise result.error
# Update the base class indexes for this request.
self._index(legacy_target_roots)
if not expect_return_values:
return
existing_addresses = set()
for address_root in address_roots:
address_state = self._scheduler.root_entries(request)[address_root]
if not address_state.value:
raise self.InvalidCommandLineSpecError(
'Spec {} does not match any targets.'.format(address_root.subject))
for address in address_state.value:
if address not in existing_addresses:
existing_addresses.add(address)
yield addres |
130s/bloom | bloom/config.py | Python | bsd-3-clause | 12,185 | 0.001067 | # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import shutil
import string
import yaml
from tempfile import mkdtemp
from bloom.git import branch_exists
from bloom.git import create_branch
from bloom.git import has_changes
from bloom.git import get_root
from bloom.git import inbranch
from bloom.git import show
from bloom.git import track_branches
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
from bloom.logging import sanitize
from bloom.util import execute_command
from bloom.util import my_copytree
BLOOM_CONFIG_BRANCH = 'master'
PLACEHOLDER_FILE = 'CONTENT_MOVED_TO_{0}_BRANCH'.format(BLOOM_CONFIG_BRANCH.upper())
config_spec = {
'name': {
'<name>': 'Name of the repository (used in the archive name)',
'upstream': 'Default value, leave this as upstream if you are unsure'
},
'vcs_uri': {
'<uri>': '''\
Any valid URI. This variable can be templated, for example an svn url
can be templated as such: "https://svn.foo.com/foo/tags/foo-:{version}"
where the :{version} token will be replaced with the version for this release.\
'''
},
'vcs_type': {
'git': 'Upstream URI is a git repository',
'hg': 'Upstream URI is a hg repository',
'svn': 'Upstream URI is a svn repository',
'tar': 'Upstream URI is a tarball'
},
'version': {
':{auto}': '''\
This means the version will be guessed from the devel branch.
This means that the devel branch must be set, the devel branch must exist,
and there must be a valid package.xml in the upstream devel branch.''',
':{ask}': '''\
This means that the user will be prompted for the version each release.
This also means that the upstream devel will be ignored.''',
'<version>': '''\
This will be the version used.
It must be updated for each new upstream version.'''
},
'release_tag': {
':{version}': '''\
This means that the release tag will match the :{version} tag.
This can be further templated, for example: "foo-:{version}" or "v:{version}"
This can describe any vcs reference. For git that means {tag, branch, hash},
for hg that means {tag, branch, hash}, for svn that means a revision number.
For tar this value doubles as the sub directory (if the repository is
in foo/ of the tar ball, putting foo here will cause the contents of
foo/ to be imported to upstream instead of foo itself).
''',
':{ask}': '''\
This means the user will be prompted for the release tag on each release.
''',
':{none}': '''\
For svn and tar only you can set the release tag to :{none}, so that
it is ignored. For svn this means no revision number is used.
'''
},
'devel_branch': {
'<vcs reference>': '''\
Branch in upstream repository on which to search for the version.
This is used only when version is set to ':{auto}'.
''',
},
'ros_distro': {
'<ROS distro>': 'This can be any valid ROS distro, e.g. groovy, hydro'
},
'patches': {
'<path in bloom branch>': '''\
This can be any valid relative path in the bloom branch. The contents
of this folder will be overlaid onto the upstream branch after each
import-upstream. Additionally, any package.xml files found in the
overlay will have the :{version} string replaced with the current
version being released.''',
':{none}': '''\
Use this if you want to disable overlaying of files.'''
},
'release_repo_url': {
'<url>': '''\
(optional) Used when pushing to remote release repositories. This is only
needed when the release uri which is in the rosdistro file is not writable.
This is useful, for example, when a releaser would like to use a ssh url
to push rather than a https:// url.
''',
':{none}': '''\
This indicates that the default release url should be used.
'''
}
}
class PromptEntry(object):
def __init__(self, name, default=None, values=None, prompt='', spec=None):
self.values = values
self.name = name
self.default = default
self.prompt = prompt
self.spec = spec
def __setattr__(sel | f, key, value):
if key == 'default' and self.values:
if value not in self.values:
error(
"Invalid input '{0}' for '{1}', acceptable values: {2}."
| .format(value, self.name, self.values),
exit=True
)
object.__setattr__(self, key, value)
def __str__(self):
msg = fmt('@_' + sanitize(self.name) + ':@|')
if self.spec is not None:
for key, val in self.spec.iteritems():
msg += '\n ' + key
for line in val.splitlines():
msg += '\n ' + line
else:
msg += '\n ' + self.prompt
msg += '\n '
if self.default is None:
msg += fmt(" @![@{yf}None@|@!]@|: ")
else:
msg += fmt(" @!['@{yf}" + sanitize(self.default) + "@|@!']@|: ")
return msg
DEFAULT_TEMPLATE = {
'name': PromptEntry('Repository Name', spec=config_spec['name'], default='upstream'),
'vcs_uri': PromptEntry('Upstream Repository URI', spec=config_spec['vcs_uri']),
'vcs_type': PromptEntry(
'Upstream VCS Type', default='git', spec=config_spec['vcs_type'],
values=['git', 'hg', 'svn', 'tar']),
'version': PromptEntry('Version', default=':{auto}', spec=config_spec['version']),
'release_tag': PromptEntry('Release Tag', default=':{version}', spec=config_spec['release_tag']),
'devel_branch': PromptEntry('Upstream Devel Branch', spec=config_spec['devel_branch']),
'patches': PromptEntry('Patches Directory', spec=config_spec['patches']),
'ros_distro': PromptEntry('ROS Distro', default='groovy', spec=config_spec['ros_distro']),
'release_repo_url': PromptEntry('Release Repository Push URL', spec=config_spec['release_repo_url']),
'release_inc': -1,
'actions': [
'bloom-export-upstream :{vcs_local_uri} :{vcs_type}'
' --tag :{release_tag} --display-uri :{vcs_uri}'
' --name :{name} --output-dir :{archive_dir_path}',
'git-bloom-import-upstream :{archive_path} :{patches}'
' --release-version :{version} --replace',
'git-bloom-generate -y rosrelease :{ros_distro}'
' --source upstream -i :{release_inc}',
'git-bloom-generate -y rosdebian --prefix release/:{ros_distro}'
' :{ros_distro} -i :{release_inc}'
]
}
CUSTOM_TEMPLATE = {
'reference': ':{ask}',
'patches': ':{name}'
}
config_template = {
't |
Tuxemon/Tuxemon | tuxemon/event/actions/print.py | Python | gpl-3.0 | 1,942 | 0 | #
# Tuxemon
# Copyright (c) 2014-2017 William Edwards <shadowapex@gmail.com>,
# Benjamin Bean <superman2k5@gmail.com>
#
# This file is part of Tuxemon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
import logging
from tuxemon.event.eventaction import EventAction
from typing import NamedTuple, Optional, final
logger = logging.getLogger(__name__)
class PrintActionParameters(NamedTuple) | :
variable: Optional[str]
@final
class PrintAction(EventAction[PrintActionParameters]):
"""
Print the current value of a game variable to the console.
If no variable is specified, print out values of all game variables.
Script usage:
.. | code-block::
print
print <variable>
Script parameters:
variable: Optional, prints out the value of this variable.
"""
name = "print"
param_class = PrintActionParameters
def start(self) -> None:
player = self.session.player
variable = self.parameters.variable
if variable:
if variable in player.game_variables:
print(f"{variable}: {player.game_variables[variable]}")
else:
print(f"'{variable}' has not been set yet by map actions.")
else:
print(player.game_variables)
|
borg-project/borg | borg/regression.py | Python | mit | 4,929 | 0.005478 | """@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import numpy
import sklearn.svm
import sklearn.pipeline
import sklearn.linear_model
import sklearn.decomposition
import sklearn.kernel_approximation
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class MultiClassifier(object):
def __init__(self, model_class, **model_kwargs):
self._model_class = model_class
self._model_kwargs = model_kwargs
def fit(self, X, Y):
(N, D) = Y.shape
(_, self._F) = X.shape
logger.info("fitting %i models to %i examples", D, N)
self._models = [None] * D
for d in xrange(D):
if d % 250 == 0:
logger.info("fit %i models so far", d)
if numpy.any(Y[:, d] > 0):
model = self._model_class(**self._model_kwargs)
model.fit(X, Y[:, d], class_weight = {0: 1.0, 1: 10.0})
else:
model = None
self._models[d] = model
return self
def predict_log_proba(self, X):
(M, F) = X.shape
D = len(self._models)
z = numpy.empty((M, D))
for (d, model) in enumerate(self._models):
if model is None:
z[:, d] = 0.0
else:
# TODO use predict_log_proba when it stops tossing warnings
z[:, d] = numpy.log(model.predict_proba(X)[:, 1] + 1e-64)
return z
def get_feature_weights(self):
coefs_list = []
for model in self._models:
if model is None:
coefs_list.append([0.0] * self._F)
else:
assert model.coef_.shape == (1, self._F)
coefs_list.append(model.coef_[0])
coefs = numpy.array(coefs_list)
weights = numpy.mean(numpy.abs(coefs), axis = 0)
return weights
def mapify_model_survivals(model):
"""Compute per-instance MAP survival functions."""
(P, S, D) = model.log_masses.shape
(_, F) = model.features.shape
unique_names = numpy.unique(model.names)
(N,) = unique_names.shape
masks = numpy.empty((N, P), bool)
map_survivals = numpy.empty((N, S, D))
features = numpy.empty((N, F))
logger.info("computing MAP RTDs over %i samples", P)
for (n, name) in enumerate(unique_names):
masks[n] = mask = model.names == name
features[n] = model.features[mask][0]
log_survivals = model.log_survival[mask]
log_weights = model.log_weights[mask]
log_weights -= numpy.logaddexp.reduce(log_weights)
map_survivals[n, :, :] = numpy.logaddexp.reduce(log_survivals + log_weights[:, None, None])
return (unique_names, masks, features, numpy.exp(map_survivals))
class NearestRTDRegression(object):
"""Predict nearest RTDs."""
def __init__(self, model):
self._model = model
(names, self._masks, features, survivals) = mapify_model_survivals(model)
(N, _) = features.shape
logger.info("computing %i^2 == %i inter-RTD distances", N, N * N)
distances = borg.bregman.survival_distances_all(survivals)
nearest = numpy.zeros((N, N), dtype = numpy.intc)
nearest_count = min(32, N / 4)
for n in xrange(N):
nearest[n, numpy.argsort(distances[n])[:nearest_count]] = 1
logger.info("fitting classifier to nearest RTDs")
classifier = MultiClassifier(sklearn.linear_model.LogisticRegression)
#classifier = MultiClassifier(sklearn.svm.SVC, scale_C = True, probability = True)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l1", C = 1e-1)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l2", C = 1e-2)
self._regression = \
sklearn.pipeline.Pipeline([
#("pca", sklearn.decomposition.PCA(whiten = True)),
#("kernel", sklearn.kernel_approximation.RBFSampler(n_components = 1000)),
("scaler", sklearn.preprocessing.Scaler()),
("classifier", classifier),
]) \
.fit(features, neares | t)
def predict(self, tasks, features):
"""Predict RTD probabilities."""
features = numpy.asarray(features)
(P,) = self._model.log_weights.shape
(N, _) = self._masks.shape
(M, F) = features.shape
predictions = self._regression.predict_log_proba(features)
weights = numpy.empty((M, P))
weights[:, :] = self._model.log_weights[None, :]
for n in xrange(N):
| weights[:, self._masks[n]] += predictions[:, n, None]
weights = numpy.exp(weights)
weights += 1e-64
weights /= numpy.sum(weights, axis = -1)[..., None]
return weights
@property
def classifier(self):
(_, classifier) = self._regression.steps[-1]
return classifier
|
Branlala/docker-sickbeardfr | sickbeard/lib/requests/packages/chardet2/escprober.py | Python | mit | 3,094 | 0.003878 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozill | a.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C | ) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .escsm import HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [ \
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM: continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM: continue
if not codingSM.active: continue
codingState = codingSM.next_state(c)
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine()
return self.get_state()
return self.get_state()
|
lucidbard/NewsBlur | apps/profile/migrations/0005_view_settings_to_preferences.py | Python | mit | 4,350 | 0.007816 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.models import User
class Migration(DataMigration):
def forwards(self, orm):
users = User.objects.all()
for user in users:
user.profile.preferences = user.profile.view_settings
user.profile.view_settings = '{}'
user.profile.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '10 | 0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.profile': {
| 'Meta': {'object_name': 'Profile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen_ip': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_seen_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preferences': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'view_settings': ('django.db.models.fields.TextField', [], {'default': "'{}'"})
}
}
complete_apps = ['profile']
|
benelot/bullet-gym | pybulletgym/envs/gym_manipulators.py | Python | mit | 10,183 | 0.03005 | from pybulletgym.envs.scene_abstract import SingleRobotEmptyScene
from gym_mujoco_xml_env import PybulletMujocoXmlEnv
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import os, sys
class PybulletReacher(PybulletMujocoXmlEnv):
def __init__(self):
PybulletMujocoXmlEnv.__init__(self, 'reacher.xml', 'body0', action_dim=2, obs_dim=9)
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=9.81, timestep=0.0020, frame_skip=8)
TARG_LIMIT = 0.27
def robot_specific_reset(self):
self.jdict["target_x"].reset_current_position(self.np_random.uniform( low=-self.TARG_LIMIT, high=self.TARG_LIMIT ), 0)
self.jdict["target_y"].reset_current_position(self.np_random.uniform( low=-self.TARG_LIMIT, high=self.TARG_LIMIT ), 0)
self.fingertip = self.parts["fingertip"]
self.target = self.parts["target"]
self.central_joint = self.jdict["joint0"]
self.elbow_joint = self.jdict["joint1"]
self.central_joint.reset_current_position(self.np_random.uniform( low=-3.14, high=3.14 ), 0)
self.elbow_joint.reset_current_position(self.np_random.uniform( low=-3.14, high=3.14 ), 0)
def apply_action(self, a):
assert( np.isfinite(a).all() )
self.central_joint.set_motor_torque( 0.05 * a[0]) #0.05*float(np.clip(a[0], -1, +1)) )
self.elbow_joint.set_motor_torque( 0.05 * a[1]) # 0.05*float(np.clip(a[1], -1, +1)) )
def calc_state(self):
theta, self.theta_dot = self.central_joint.current_relative_position()
self.gamma, self.gamma_dot = self.elbow_joint.current_relative_position()
target_x, _ = self.jdict["target_x"].current_position()
target_y, _ = self.jdict["target_y"].current_position()
self.to_target_vec = np.array(self.fingertip.pose().xyz()) - np.array(self.target.pose().xyz())
return np.array([
target_x,
target_y,
self.to_target_vec[0],
self.to_target_vec[1],
np.cos(theta),
np.sin(theta),
self.theta_dot,
self.gamma,
self.gamma_dot,
])
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
def _step(self, a):
assert(not self.scene.multiplayer)
self.apply_action(a)
self.scene.global_step()
state = self.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.calc_potential()
electricity_cost = (
-0.10*(np.abs(a[0]*self.theta_dot) + np.abs(a[1]*self.gamma_dot)) # work torque*angular_velocity
-0.01*(np.abs(a[0]) + np.abs(a[1])) # stall torque require some energy
)
stuck_joint_cost = -0.1 if np.abs(np.abs(self.gamma)-1) < 0.01 else 0.0
self.rewards = [float(self.potential - potential_old), float(electricity_cost), float(stuck_joint_cost)]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def camera_adjust(self):
x, y, z = self.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class PybulletPusher(PybulletMujocoXmlEnv):
def __init__ | (self):
PybulletMujocoXmlEnv.__init__(self, 'pusher.xml', 'body0', action_dim=7, obs_dim=5)
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=9.81, timestep=0.0020, frame_skip=5)
def robot_specific_r | eset(self):
self.fingertip = self.parts["fingertip"]
# qpos = self.init_qpos
self.goal_pos = np.asarray([0, 0])
while True:
self.cylinder_pos = np.concatenate([
self.np_random.uniform(low=-0.3, high=0, size=1),
self.np_random.uniform(low=-0.2, high=0.2, size=1)])
if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:
break
# This is probably position setting
# qpos[-4:-2] = self.cylinder_pos
# qpos[-2:] = self.goal_pos
# qvel = self.init_qvel + self.np_random.uniform(low=-0.005,
# high=0.005, size=self.model.nv)
# qvel[-4:] = 0
# self.set_state(qpos, qvel)
def apply_action(self, a):
assert( np.isfinite(a).all() )
def calc_state(self):
return np.concatenate([
np.array([j.current_position() for j in self.ordered_joints]).flatten(), # position
np.array([j.current_relative_position() for j in self.ordered_joints]).flatten(), # speed
self.parts["fingertip"].pose().xyz(),
self.parts["object"].pose().xyz(),
self.parts["goal"].pose().xyz(),
])
def _step(self, a):
self.apply_action(a)
self.scene.global_step()
state = self.calc_state()
reward_near_vec = self.parts["object"].pose().xyz() - self.parts["fingertip"].pose().xyz()
reward_dist_vec = self.parts["object"].pose().xyz() - self.parts["goal"].pose().xyz()
reward_near = - np.linalg.norm(reward_near_vec)
reward_dist = - np.linalg.norm(reward_dist_vec)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
done = False
return state, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def camera_adjust(self):
x, y, z = self.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class PybulletStriker(PybulletMujocoXmlEnv):
def __init__(self):
PybulletMujocoXmlEnv.__init__(self, 'striker.xml', 'body0', action_dim=7, obs_dim=5)
self._striked = False
self._min_strike_dist = np.inf
self.strike_threshold = 0.1
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=9.81, timestep=0.0020, frame_skip=5)
def robot_specific_reset(self):
self.fingertip = self.parts["fingertip"]
self._min_strike_dist = np.inf
self._striked = False
self._strike_pos = None
# reset position of manipulator
for j in self.ordered_joints:
j.reset_current_position(self.np_random.uniform( low=-0.1, high=0.1 ))
# reset speed of manipulator
# reset ball position
# qpos = self.init_qpos
self.ball = np.array([0.5, -0.175])
while True:
self.goal = np.concatenate([
self.np_random.uniform(low=0.15, high=0.7, size=1),
self.np_random.uniform(low=0.1, high=1.0, size=1)])
if np.linalg.norm(self.ball - self.goal) > 0.17:
break
# This is probably position setting
# qpos[-9:-7] = [self.ball[1], self.ball[0]]
# qpos[-7:-5] = self.goal
# diff = self.ball - self.goal
# angle = -np.arctan(diff[0] / (diff[1] + 1e-8))
# qpos[-1] = angle / 3.14
# qvel = self.init_qvel + self.np_random.uniform(low=-.1, high=.1,
# size=self.model.nv)
# qvel[7:] = 0
# self.set_state(qpos, qvel)
def apply_action(self, a):
assert( np.isfinite(a).all() )
def calc_state(self):
return np.concatenate([
np.array([j.current_position() for j in self.ordered_joints]).flatten(), # position
np.array([j.current_relative_position() for j in self.ordered_joints]).flatten(), # speed
self.parts["fingertip"].pose().xyz(),
self.parts["object"].pose().xyz(),
self.parts["goal"].pose().xyz(),
])
def _step(self, a):
self.apply_action(a)
self.scene.global_step()
state = self.calc_state()
dist_object_finger = self.parts["object"].pose().xyz() - self.parts["fingertip"].pose().xyz()
reward_dist_vec = self.parts["object"].pose().xyz() - self.parts["goal"].pose().xyz()
self._min_strike_dist = min(self._min_strike_dist, np.linalg.norm(reward_dist_vec))
if np.linalg.norm(dist_object_finger) < self.strike_threshold:
self._striked = True
self._strike_pos = self.parts["fingertip"].pose().xyz()
if self._striked:
reward_near_vec = self.parts["object"].pose().xyz() - self._strike_pos
else:
reward_near_vec = self.parts["object"].pose().xyz() - self.parts["fingertip"].pose().xyz()
reward_near = - np.linalg.norm(reward_near_vec)
reward_dist = - np.linalg.norm(self._min_strike_dist)
reward_ctrl = - np.square(a).sum()
reward = 3 * reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
done = False
return state, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def camera_adjust(self):
x, y, z = self.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class PybulletThrower(PybulletMujocoXmlEnv):
def __init__(self):
PybulletMujocoXmlEnv.__init__(self, 'thrower.xml', 'body0', action_dim=7, obs_dim=5)
self._ball_hit_ground = False
self._ball_hit_location = None
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=0.0, timeste |
msabramo/requests | requests/models.py | Python | isc | 27,078 | 0.001883 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import os
from datetime import datetime
from .hooks import dispatch_hook, HOOKS
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .auth import HTTPBasicAuth, HTTPProxyAuth
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.exceptions import MaxRetryError, LocationParseError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3 import connectionpool, poolmanager
from .packages.urllib3.filepost import encode_multipart_formdata
from .defaults import SCHEMAS
from .exceptions import (
ConnectionError, HTTPError, RequestException, Timeout, TooManyRedirects,
URLRequired, SSLError, MissingSchema, InvalidSchema, InvalidURL)
from .utils import (
get_encoding_from_headers, stream_untransfer, guess_filename, requote_uri,
dict_from_string, stream_decode_response_unicode, get_netrc_auth,
DEFAULT_CA_BUNDLE_PATH)
from .compat import (
urlparse, urlunparse, urljoin, urlsplit, urlencode, str, bytes,
SimpleCookie, is_py2)
# Import chardet if it is available.
try:
import chardet
except ImportError:
pass
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
class Request(object):
"""The :class:`Request <Request>` object. It carries out all f | unctionality of
Requests. Recommended interface is with the Requests functions.
"""
def __init__(self,
url=None,
headers=dict(),
files=Non | e,
method=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
timeout=None,
redirect=False,
allow_redirects=False,
proxies=None,
hooks=None,
config=None,
prefetch=False,
_poolmanager=None,
verify=None,
session=None,
cert=None):
#: Dictionary of configurations for this request.
self.config = dict(config or [])
#: Float describes the timeout of the request.
# (Use socket.setdefaulttimeout() as fallback)
self.timeout = timeout
#: Request URL.
self.url = url
#: Dictionary of HTTP Headers to attach to the :class:`Request <Request>`.
self.headers = dict(headers or [])
#: Dictionary of files to multipart upload (``{filename: content}``).
self.files = None
#: HTTP Method to use.
self.method = method
#: Dictionary or byte of request body data to attach to the
#: :class:`Request <Request>`.
self.data = None
#: Dictionary or byte of querystring data to attach to the
#: :class:`Request <Request>`. The dictionary values can be lists for representing
#: multivalued query parameters.
self.params = None
#: True if :class:`Request <Request>` is part of a redirect chain (disables history
#: and HTTPError storage).
self.redirect = redirect
#: Set to True if full redirects are allowed (e.g. re-POST-ing of data at new ``Location``)
self.allow_redirects = allow_redirects
# Dictionary mapping protocol to the URL of the proxy (e.g. {'http': 'foo.bar:3128'})
self.proxies = dict(proxies or [])
# If no proxies are given, allow configuration by environment variables
# HTTP_PROXY and HTTPS_PROXY.
if not self.proxies and self.config.get('trust_env'):
if 'HTTP_PROXY' in os.environ:
self.proxies['http'] = os.environ['HTTP_PROXY']
if 'HTTPS_PROXY' in os.environ:
self.proxies['https'] = os.environ['HTTPS_PROXY']
self.data, self._enc_data = self._encode_params(data)
self.params, self._enc_params = self._encode_params(params)
self.files, self._enc_files = self._encode_files(files)
#: :class:`Response <Response>` instance, containing
#: content and metadata of HTTP Response, once :attr:`sent <send>`.
self.response = Response()
#: Authentication tuple or object to attach to :class:`Request <Request>`.
self.auth = auth
#: CookieJar to attach to :class:`Request <Request>`.
self.cookies = dict(cookies or [])
#: True if Request has been sent.
self.sent = False
#: Event-handling hooks.
self.hooks = {}
for event in HOOKS:
self.hooks[event] = []
hooks = hooks or {}
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
#: Session.
self.session = session
#: SSL Verification.
self.verify = verify
#: SSL Certificate
self.cert = cert
#: Prefetch response content
self.prefetch = prefetch
if headers:
headers = CaseInsensitiveDict(self.headers)
else:
headers = CaseInsensitiveDict()
# Add configured base headers.
for (k, v) in list(self.config.get('base_headers', {}).items()):
if k not in headers:
headers[k] = v
self.headers = headers
self._poolmanager = _poolmanager
def __repr__(self):
return '<Request [%s]>' % (self.method)
def _build_response(self, resp):
"""Build internal :class:`Response <Response>` object
from given response.
"""
def build(resp):
response = Response()
# Pass settings over.
response.config = self.config
if resp:
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', None))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
# Start off with our local cookies.
cookies = self.cookies or dict()
# Add new cookies from the server.
if 'set-cookie' in response.headers:
cookie_header = response.headers['set-cookie']
cookies = dict_from_string(cookie_header)
# Save cookies in Response.
response.cookies = cookies
# No exceptions were harmed in the making of this request.
response.error = getattr(resp, 'error', None)
# Save original response for later.
response.raw = resp
if isinstance(self.full_url, bytes):
response.url = self.full_url.decode('utf-8')
else:
response.url = self.full_url
return response
history = []
r = build(resp)
self.cookies.update(r.cookies)
if r.status_code in REDIRECT_STATI and not self.redirect:
while (('location' in r.headers) and
((r.status_code is codes.see_other) or (self.allow_redirects))):
r.content # Consume socket so it can be released
if not len(history) < self.config.get('max_redirects'):
raise TooManyRedirects()
# Release the connection back into the pool.
r.raw.release_conn()
history.append(r)
url = r.headers['location']
data = self.data
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(r.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
url = urljoin(r.url,
# Compliant with RFC3986, we percent |
mlskit/astromlskit | REGRESSION/linear.py | Python | gpl-3.0 | 2,059 | 0.000971 | from numpy import *
import matplotlib.pyplot as plt
def file2matrix(filename):
data_file = open(filename)
data_lines = data_file.readlines()
number_of_lines = len(data_lines)
data_matrix = zeros((number_of_lines, 1))
for line_index in range(number_of_lines):
line = data_lines[line_index].strip()
data_matrix[line_index, :] = line[:]
return data_matrix
def load_data(x_filename, y_filename):
x_file = open(x_filename)
y_file = open(y_filename)
x_array = []
y_array = []
for x_line in x_file.readlines():
x_array.append([1.0, float(x_line.strip())])
for y_line in y_file.readlines():
y_array.append(float(y_line.strip()))
return x_array, y_array
def regression(x_array, y_array):
x_matrix = mat(x_array)
y_matrix = mat(y_array).T
m, n = shape(x_matrix)
max | _cycles = 1500
alpha = 0.07
weights = ones((n, 1))
theta = ones((n, 1))
for cycle_index in range(max_cycles):
for j in range(n):
theta[j] = weights[j] - alpha / m * partial_derivative_of_h(weights, x_matrix, y_matrix, j)
weights = [weight for weight in theta]
return theta
def partial_derivative_of_h(weights, x_matrix, y_matrix, j):
error = hypothesis(weights, x_matrix) - y_matrix
m = shape(error)[0]
for i in range(m):
error[i] = err | or[i] * x_matrix[i, j]
return sum(error)
def hypothesis(weights, x_matrix):
return x_matrix * weights
def plot_best_fit(weights, x_matrix, y_matrix):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x_matrix[:], y_matrix[:])
plt.xlabel('Age')
plt.ylabel('Height')
x = arange(0.0, 10.0, 0.1)
y = weights[0] + weights[1] * x
ax.plot(x, y)
plt.show()
if __name__ == '__main__':
x_array, y_array = load_data('ex2x.dat', 'ex2y.dat')
weights = regression(x_array, y_array)
print weights
x_matrix = file2matrix('ex2x.dat')
y_matrix = file2matrix('ex2y.dat')
plot_best_fit(weights, x_matrix, y_matrix)
|
brainstorm/bcbio-nextgen | scripts/bcbio_fastq_umi_prep.py | Python | mit | 4,259 | 0.004226 | #!/usr/bin/env python
"""Convert 3 fastq inputs (read 1, read 2, UMI) into paired inputs with UMIs in read names
Usage:
bcbio_fastq_umi_prep.py single <out basename> <read 1 fastq> <read 2 fastq> <umi fastq>
or:
bcbio_fastq_umi_prep.py autopair [<list> <of> <fastq> <files>]
Creates two fastq files with embedded UMIs: <out_basename>_R1.fq.gz <out_basename>_R2.fq.gz
or a directory of fastq files with UMIs added to the names.
"""
from __future__ import print_function
import argparse
import os
import sys
from bcbio import utils
from bcbio.bam import fastq
from bcbio.provenance import do
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
transform_json = r"""{
"read1": "(?P<name>@.*)\\n(?P<seq>.*)\\n\\+(.*)\\n(?P<qual>.*)\\n",
"read2": "(?P<name>@.*)\\n(?P<seq>.*)\\n\\+(.*)\\n(?P<qual>.*)\\n",
"read3": "(@.*)\\n(?P<MB>.*)\\n\\+(.*)\\n(.*)\\n"
}
"""
def run_single(args):
add_umis_to_fastq(args.out_base, args.read1_fq, args.read2_fq, args.umi_fq)
@utils.map_wrap
@zeromq_aware_logging
def add_umis_to_fastq_parallel(out_base, read1_fq, read2_fq, umi_fq, config):
add_umis_to_fastq(out_base, read1_fq, read2_fq, umi_fq)
def add_umis_to_fastq(out_base, read1_fq, read2_fq, umi_fq):
print("Processing", read1_fq, read2_fq, umi_fq)
cores = 8
| out1_fq = out_base + "_R1.fq.gz"
out2_fq = out_base + "_R2.fq.gz"
transform_json_file = out_base + "-transform.json"
with open(transform_json_file, "w") as out_handle:
out_handle.write(transform_json)
with utils.open_gzipsafe(read1_fq) as in_handle:
ex_name = in_handle.readline().split(" ")
if len(ex_name) == 2:
fastq_tags_arg = "--keep_fastq_tags"
else:
fastq_tags_arg = ""
| cmd = ("umis fastqtransform {fastq_tags_arg} "
"--fastq1out >(pbgzip -n {cores} -c > {out1_fq}) "
"--fastq2out >(pbgzip -n {cores} -c > {out2_fq}) "
"{transform_json_file} {read1_fq} "
"{read2_fq} {umi_fq}")
do.run(cmd.format(**locals()), "Add UMIs to paired fastq files")
os.remove(transform_json_file)
def run_autopair(args):
outdir = utils.safe_makedir(args.outdir)
to_run = []
extras = []
for fnames in fastq.combine_pairs(sorted(args.files)):
if len(fnames) == 2:
to_run.append(fnames)
elif len(fnames) == 3:
r1, r2, r3 = sorted(fnames)
to_run.append([r1, r2])
extras.append(r3)
else:
assert len(fnames) == 1, fnames
extras.append(fnames[0])
ready_to_run = []
for r1, r2 in to_run:
target = os.path.commonprefix([r1, r2])
r3 = None
for test_r3 in extras:
if (os.path.commonprefix([r1, test_r3]) == target and
os.path.commonprefix([r2, test_r3]) == target):
r3 = test_r3
break
assert r3, (r1, r2, extras)
base_name = os.path.join(outdir, os.path.commonprefix([r1, r2, r3]).rstrip("_R"))
ready_to_run.append([base_name, r1, r3, r2, {"algorithm": {}, "resources": {}}])
parallel = {"type": "local", "cores": len(ready_to_run), "progs": []}
run_multicore(add_umis_to_fastq_parallel, ready_to_run, {"algorithm": {}}, parallel)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Add UMIs to fastq read names")
sp = parser.add_subparsers(title="[sub-commands]")
p = sp.add_parser("autopair", help="Automatically pair R1/R2/R3 fastq inputs")
p.add_argument("--outdir", default="with_umis", help="Output directory to write UMI prepped fastqs")
p.add_argument("files", nargs="*", help="All fastq files to pair and process")
p.set_defaults(func=run_autopair)
p = sp.add_parser("single", help="Run single set of fastq files with separate UMIs")
p.add_argument("out_base", help="Base name for output files -- you get <base_name>_R1.fq.gz")
p.add_argument("read1_fq", help="Input fastq, read 1")
p.add_argument("read2_fq", help="Input fastq, read 2")
p.add_argument("umi_fq", help="Input fastq, UMIs")
p.set_defaults(func=run_single)
if len(sys.argv) == 1:
parser.print_help()
args = parser.parse_args()
args.func(args)
|
meskio/keymanager | src/leap/keymanager/_version.py | Python | gpl-3.0 | 8,362 | 0.000598 |
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source, "r")
for line in f.readlines():
if line.strip().startswith("gi | t_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
| if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": variables["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": variables["full"].strip(),
"full": variables["full"].strip()}
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" %
(stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start "
"with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "leap.keymanager-"
versionfile_source = "src/leap/keymanager/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = {"refnames": git_refnames, "full": git_full}
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not |
vyrus/wubi | src/bittorrent/CurrentRateMeasure.py | Python | gpl-2.0 | 1,016 | 0.004921 | # Written by Bram Cohen
# see LICENSE.txt for license information
from time import time
class Measure:
def __init__(self, max_rate_period, fudge = 1):
self.max_rate_period = max_rate_period
self.ratesince = time() - fudge
self.last = self.ratesince
self.rate = 0.0
self.total = 0l
def update_rate(self, amount):
self.total += amount
t = time()
self.rate = (self.rate * (self.last - self.ratesince) +
amount) / (t - self.ratesince)
self.last = t
if self.rat | esince < t - self.max_rate_period:
self.ratesince = t - self.max_rate_period
def | get_rate(self):
self.update_rate(0)
return self.rate
def get_rate_noupdate(self):
return self.rate
def time_until_rate(self, newrate):
if self.rate <= newrate:
return 0
t = time() - self.ratesince
return ((self.rate * t) / newrate) - t
def get_total(self):
return self.total
|
OnShift/page_object | features/steps/label_steps.py | Python | apache-2.0 | 288 | 0 | @when(u'I get t | he text from the label')
def step_impl(context):
context.expected_text = context.page.label_id()
@when(u'I search for the label by "{how}"')
def step_impl(context, how):
method = 'label_{0}'.format(how)
co | ntext.expected_text = getattr(context.page, method)()
|
gophronesis/smlib | smlib/keras_vectorizer.py | Python | apache-2.0 | 2,229 | 0.005832 | import sys
import numpy as np
from normalization import tokenize
from helpers import ahash
class KerasVectorizer():
'''
Convert list of documents to numpy array for input into Keras model
'''
def __init__(self, n_features=100000, maxlen=None, maxper=100, hash_function=ahash):
self.maxlen = maxlen
self.maxper = maxper
self.n_features = n_features
self.hash_function = hash_function
def _exact_hash(self, word, n_features):
return self.token_lookup.get(word, 0)
def fit_transform(self, raw_documents, y=None, suffix='', verbose=True):
if verbose:
print >> sys.stderr, 'splitting raw documents'
# Some way to print progress?
tokens = map(self._split_function, raw_documents)
if self.maxlen:
maxlen = self.maxlen
else:
maxlen = int(np.percentile(map(len, tokens), self.maxper))
self.maxlen = maxlen
X = np.zeros((len(tokens), maxlen))
for i,t in enumerate(tokens):
if verbose:
if not i % 10000:
print >> sys.stderr, 'processed %d tokens' % i
if len(t) > 0:
X[i,-len(t):] = map(lambda x: self.hash_function(x + suffix, self.n_features), t[:maxlen])
return X
class KerasCharacterVectorizer(KerasVectorizer):
'''
Split a string into characters
'''
def _split_function(self, doc):
return list(doc)
class KerasTokenVectorizer(KerasVectorizer):
'''
Split a string into words,
'''
def _split_function(self, doc):
return tokenize(doc, keep_punctuation=True)
class KerasPretokenizedVectorizer(KerasVectorizer):
def _split_function(self, doc):
return doc
'''
from keras_vectorizer import KerasTokenVectorizer, KerasCharacterVectorizer
ktv = KerasTokenVectorizer()
ktv.fit_transform(['this is a test'])
ktv.fit_transform( | ['this is a test', 'this is a another test'])
ktv = KerasTokenVectorizer(maxlen=2)
ktv.fit_transform(['this is a test', 'this is a another test'])
kcv = KerasCharacterVectorizer()
kcv.fit_transform(['something', 'else' | ])
'''
|
sixohsix/xychan | tests/test_user.py | Python | gpl-3.0 | 315 | 0.003175 | from test_basics import setUp, app
def test_visit_login():
r = app.get('/mod/login')
assert "<form na | me=\"login\"" in r
def test_login():
r = app.get('/mod/login')
r.form['username'] = 'admin'
r.form['password'] = 'adminadmin1'
r = r.form.submit()
assert "You are now logged | in" in r
|
shobhitmishra/CodingProblems | epi_judge_python/insert_operators_in_string.py | Python | mit | 426 | 0 | from typing import List
from test_framework import generic_test
def expressio | n_synthesis(digits: List[int], target: int) -> bool:
# TODO - you fill in here.
return True
if __name__ | == '__main__':
exit(
generic_test.generic_test_main('insert_operators_in_string.py',
'insert_operators_in_string.tsv',
expression_synthesis))
|
HERA-Team/pyuvdata | pyuvdata/uvdata/tests/test_mir.py | Python | bsd-2-clause | 19,246 | 0.001871 | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for Mir class.
Performs a series of test for the Mir class, which inherits from UVData. Note that
there is a separate test module for the MirParser class (mir_parser.py), which is
what is used to read the raw binary data into something that the Mir class can
manipulate into a UVData object.
"""
import os
import pytest
import numpy as np
from ... import tests as uvtest
from ... import UVData
from ...data import DATA_PATH
from ...uvdata.mir_parser import MirParser
from ...uvdata.mir import Mir
@pytest.fixture(scope="session")
def sma_mir_filt_main():
# read in test file for the resampling in time functions
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_object.read(testfile, pseudo_cont=True, corrchunk=0)
uv_object.flag_array[:, :, : uv_object.Nfreqs // 2, 0] = True
uv_object.flag_array[:, :, uv_object.Nfreqs // 2 :, 1] = True
uv_object.set_lsts_from_time_array()
uv_object._set_app_coords_helper()
yield uv_object
@pytest.fixture(scope="function")
def sma_mir_filt(sma_mir_filt_main):
# read in test file for the resampling in time functions
uv_object = sma_mir_filt_main.copy()
yield uv_object
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_uvfits(sma_mir, tmp_path, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(tmp_path, "outtest_mir.uvfits")
uvfits_uv = UVData()
if future_shapes:
sma_mir.use_future_array_shapes()
sma_mir.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
if future_shapes:
uvfits_uv.use_future_array_shapes()
# UVFITS doesn't allow for numbering of spectral windows like MIR does, so
# we need an extra bit of handling here
assert len(np.unique(sma_mir.spw_array)) == len(np.unique(uvfits_uv.spw_array))
spw_dict = {idx: jdx for idx, jdx in zip(uvfits_uv.spw_array, sma_mir.spw_array)}
assert np.all(
[
idx == spw_dict[jdx]
for idx, jdx in zip(sma_mir.flex_spw_id_array, uvfits_uv.flex_spw_id_array,)
]
)
# Now that we've checked, set this things as equivalent
uvfits_uv.spw_array = sma_mir.spw_array
uvfits_uv.flex_spw_id_array = sma_mir.flex_spw_id_array
# Check the history first via find
assert 0 == uvfits_uv.history.find(
sma_mir.history + " Read/written with pyuvdata version:"
)
sma_mir.history = uvfits_uv.history
# We have to do a bit of special handling for the phase_center_catalog, because
# _very_ small errors (like last bit in the mantissa) creep in when passing through
# the util function transform_sidereal_coords (for mutli-phase-ctr datasets). Verify
# the two match up in terms of their coordinates
for cat_name in sma_mir.phase_center_catalog.keys():
assert np.isclose(
sma_mir.phase_center_catalog[cat_name]["cat_lat"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lat"],
)
assert np.isclose(
sma_mir.phase_center_catalog[cat_name]["cat_lon"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lon"],
)
uvfits_uv.phase_center_catalog = sma_mir.phase_center_catalog
# There's a minor difference between what SMA calculates online for app coords
# and what pyuvdata calculates, to the tune of ~1 arcsec. Check those values here,
# then set them equal to one another.
assert np.all(
np.abs(sma_mir.phase_center_app_ra - uvfits_uv.phase_center_app_ra) < 1e-5
)
assert np.all(
np.abs(sma_mir.phase_center_app_dec - uvfits_uv.phase_center_app_dec) < 1e-5
)
sma_mir._set_app_coords_helper()
uvfits_uv._set_app_coords_helper()
# make sure filenames are what we expect
assert sma_mir.filename == ["sma_test.mir"]
assert uvfits_uv.filename == ["outtest_mir.uvfits"]
sma_mir.filename = uvfits_uv.filename
assert sma_mir == uvfits_uv
# Since mir is mutli-phase-ctr by default, this should effectively be a no-op
sma_mir._set_multi_phase_center()
assert sma_mir == uvfits_uv
@pytest.mark.filterwarnings("ignore:Writing in the MS file that the units of the data")
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_ms(sma_mir, tmp_path, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as ms, read back in and check for
object equality.
"""
pytest.importorskip("casacore")
testfile = os.path.join(tmp_path, "outtest_mir.ms")
ms_uv = UVData()
if future_shapes:
sma_mir.use_future_array_shapes()
sma_mir.write_ms(testfile, clobber=True)
ms_uv.read(testfile)
# Single integration with 1 phase center = single scan number
# output in the MS
assert ms_uv.scan_number_array == np.array([1])
if future_shapes:
ms_uv.use_future_array_shapes()
# There are some minor differences between the values stored by MIR and that
# calculated by UVData. Since MS format requires these to be calculated on the fly,
# we calculate them here just to verify that everything is looking okay.
sma_mir.set_lsts_from_time_array()
sma_mir._set_app_coords_helper()
# These | reorderings just make sure that data from the two formats are lined up
# correctly.
sma_mir.reorder_freqs(spw_order="number")
ms_uv.reorder_blts()
# MS doesn't have the concept of an "instrument" name like FITS does, and instead
# de | faults to the telescope name. Make sure that checks out here.
assert sma_mir.instrument == "SWARM"
assert ms_uv.instrument == "SMA"
sma_mir.instrument = ms_uv.instrument
# Quick check for history here
assert ms_uv.history != sma_mir.history
ms_uv.history = sma_mir.history
# Only MS has extra keywords, verify those look as expected.
assert ms_uv.extra_keywords == {"DATA_COL": "DATA", "observer": "SMA"}
assert sma_mir.extra_keywords == {}
sma_mir.extra_keywords = ms_uv.extra_keywords
# Make sure the filenames line up as expected.
assert sma_mir.filename == ["sma_test.mir"]
assert ms_uv.filename == ["outtest_mir.ms"]
sma_mir.filename = ms_uv.filename = None
# Finally, with all exceptions handled, check for equality.
assert ms_uv.__eq__(sma_mir, allowed_failures=["filename"])
@pytest.mark.filterwarnings("ignore:LST values stored ")
def test_read_mir_write_uvh5(sma_mir, tmp_path):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(tmp_path, "outtest_mir.uvh5")
uvh5_uv = UVData()
sma_mir.write_uvh5(testfile)
uvh5_uv.read_uvh5(testfile)
# Check the history first via find
assert 0 == uvh5_uv.history.find(
sma_mir.history + " Read/written with pyuvdata version:"
)
# test fails because of updated history, so this is our workaround for now.
sma_mir.history = uvh5_uv.history
# make sure filenames are what we expect
assert sma_mir.filename == ["sma_test.mir"]
assert uvh5_uv.filename == ["outtest_mir.uvh5"]
sma_mir.filename = uvh5_uv.filename
assert sma_mir == uvh5_uv
def test_write_mir(hera_uvh5, err_type=NotImplementedError):
"""
Mir writer test
Check and make sure that attempts to use the writer return a
'not implemented' error.
"""
# Check and see if the correct error is raised
with pytest.raises(err_type):
hera_uvh5.write_mir("dummy.mir")
def test_multi_nchan_spw_read(tmp_path):
"""
Mir to uvfits error test for spws of different sizes.
Read in Mir files, write out as uvfits, read back in and check for
obje |
itsjeyd/edx-platform | openedx/core/djangoapps/coursegraph/utils.py | Python | agpl-3.0 | 1,464 | 0 | """
Helpers for the CourseGraph app
"""
from django.core.cache import cache
from django.utils import timezone
class TimeRecordingCacheBase(object):
"""
A base class for caching the current time for some key.
"""
# cache_prefix should be defined in children classes
cache_prefix = None
_cache = cache
def _key(self, course_key):
"""
Make a cache key from the prefix and a course_key
:param course_key: CourseKey object
:ret | urn: a cache key
"""
return self.cache_prefix + unicode(course_key)
def get(self, course_key):
"""
Gets the time value associated with the CourseKey.
:param course_key: a CourseKey object.
:return: the time the key was last set.
"""
return self._cache.get(self._key(course_key)) |
def set(self, course_key):
"""
Sets the current time for a CourseKey key.
:param course_key: a CourseKey object.
"""
return self._cache.set(self._key(course_key), timezone.now())
class CourseLastPublishedCache(TimeRecordingCacheBase):
"""
Used to record the last time that a course had a publish event run on it.
"""
cache_prefix = u'course_last_published'
class CommandLastRunCache(TimeRecordingCacheBase):
"""
Used to record the last time that the dump_to_neo4j command was run on a
course.
"""
cache_prefix = u'dump_to_neo4j_command_last_run'
|
chichilalescu/bfps | tests/test_field_class.py | Python | gpl-3.0 | 6,948 | 0.012666 | import numpy as np
import h5py
import matplotlib.pyplot as plt
import pyfftw
import bfps
import bfps.tools
import os
from bfps._fluid_base import _fluid_particle_base
class TestField(_fluid_particle_base):
def __init__(
self,
name = 'TestField-v' + bfps.__version__,
work_dir = './',
simname = 'test',
fluid_precision = 'single',
use_fftw_wisdom = False):
_fluid_particle_base.__init__(
self,
name = name + '-' + fluid_precision,
work_dir = work_dir,
simname = simname,
dtype = fluid_precision,
use_fftw_wisdom = use_fftw_wisdom)
self.fill_up_fluid_code()
self.finalize_code()
return None
def fill_up_fluid_code(self):
self.fluid_includes += '#include <cstring>\n'
self.fluid_includes += '#include "fftw_tools.hpp"\n'
self.fluid_includes += '#include "field.hpp"\n'
self.fluid_variables += ('field<' + self.C_dtype + ', FFTW, ONE> *f;\n' +
'field<' + self.C_dtype + ', FFTW, THREE> *v;\n' +
'kspace<FFTW, SMOOTH> *kk;\n')
self.fluid_start += """
//begincpp
f = new field<{0}, FFTW, ONE>(
nx, ny, nz, MPI_COMM_WORLD);
v = new field<{0}, FFTW, THREE>(
nx, ny, nz, MPI_COMM_WORLD);
kk = new kspace<FFTW, SMOOTH>(
f->clayout, 1., 1., 1.);
// read rdata
f->real_space_representation = true;
f->io("field.h5", "scal", 0, true);
// go to fourier space, write into cdata_tmp
f->dft();
f->io("field.h5", "scal_tmp", 0, false);
f->ift();
f->io("field.h5", "scal", 0, false);
f->real_space_representation = false;
f->io("field.h5", "scal", 0, true);
hid_t gg;
if (f->myrank == 0)
gg = H5Fopen("field.h5", H5F_ACC_RDWR, H5P_DEFAULT);
kk->cospectrum<float, ONE>(
f->get_cdata(),
f->get_cdata(),
gg,
"scal",
0);
f->ift();
f->io("field.h5", "scal_tmp", 0, false);
std::vector<double> me;
me.resize(1);
me[0] = 30;
f->compute_rspace_stats(
gg, "scal",
0, me);
if (f->myrank == 0)
H5Fclose(gg);
v->real_space_representation = false;
v->io("field.h5", "vec", 0, true);
v->io("field.h5", "vec_tmp", 0, false);
//endcpp
""".format(self.C_dtype)
self.fluid_end += """
//begincpp
delete f;
delete v;
//endcpp
"""
return None
def specific_parser_arguments(
self,
parser):
_fluid_particle_base.specific_parser_arguments(self, parser)
return None
def launch(
self,
args = [],
**kwargs):
opt = self.prepare_launch(args)
self.parameters['niter_todo'] = 0
self.pars_from_namespace(opt)
self.set_host_info(bfps.host_info)
self.write_par()
self.run(ncpu = opt.ncpu)
return None
def main():
n = 32
kdata = pyfftw.n_byte_align_empty(
(n, n, n//2 + 1),
pyfftw.simd_alignment,
dtype = np.complex64)
rdata = pyfftw.n_byte_align_empty(
(n, n, n),
pyfftw.simd_alignment,
dtype = np.float32)
c2r = pyfftw.FFTW(
kdata.transpose((1, 0, 2)),
rdata,
axes = (0, 1, 2),
direction = 'FFTW_BACKWARD',
threads = 2)
kdata[:] = bfps.tools.generate_data_3D(n, n, n, dtype = np.complex64)
cdata = kdata.copy()
c2r.execute()
tf = TestField()
tf.parameters['nx'] = n
tf.parameters['ny'] = n
tf.parameters['nz'] = n
f = h5py.File('field.h5', 'w')
f['scal/complex/0'] = cdata
f['scal/real/0'] = rdata
f['vec/complex/0'] = np.array([cdata, cdata, cdata]).reshape(cdata.shape + (3,))
f['vec/real/0'] = np.array([rdata, rdata, rdata]).reshape(rdata.shape + (3,))
f['moments/scal'] = np.zeros(shape = (1, 10)).astype(np.float)
f['histograms/scal'] = np.zeros(shape = (1, 64)).astype(np.float)
kspace = tf.get_kspace()
nshells = kspace['nshell'].shape[0]
f['spectra/scal'] = np.zeros(shape = (1, nshells)).astype(np.float64)
f.close()
## run cpp code
tf.launch(
['-n', '{0}'.format(n),
'--ncpu', '2'])
f = h5py.File('field.h5', 'r')
#err0 = np.max(np.abs(f['scal_tmp/real/0'].value - rdata)) / np.mean(np.abs(rda | ta))
#err1 = np.max(np.abs(f['scal | /real/0'].value/(n**3) - rdata)) / np.mean(np.abs(rdata))
#err2 = np.max(np.abs(f['scal_tmp/complex/0'].value/(n**3) - cdata)) / np.mean(np.abs(cdata))
#print(err0, err1, err2)
#assert(err0 < 1e-5)
#assert(err1 < 1e-5)
#assert(err2 < 1e-4)
## compare
fig = plt.figure(figsize=(18, 6))
a = fig.add_subplot(131)
a.set_axis_off()
v0 = f['vec/complex/0'][:, :, 0, 0]
v1 = f['vec_tmp/complex/0'][:, :, 0, 0]
a.imshow(np.log(np.abs(v0 - v1)),
interpolation = 'none')
a = fig.add_subplot(132)
a.set_axis_off()
a.imshow(np.log(np.abs(v0)),
interpolation = 'none')
a = fig.add_subplot(133)
a.set_axis_off()
a.imshow(np.log(np.abs(v1)),
interpolation = 'none')
fig.tight_layout()
fig.savefig('tst_fields.pdf')
fig = plt.figure(figsize=(18, 6))
a = fig.add_subplot(131)
a.set_axis_off()
v0 = f['scal/complex/0'][:, :, 0]
v1 = f['scal_tmp/complex/0'][:, :, 0]
a.imshow(np.log(np.abs(v0 - v1)),
interpolation = 'none')
a = fig.add_subplot(132)
a.set_axis_off()
a.imshow(np.log(np.abs(v0)),
interpolation = 'none')
a = fig.add_subplot(133)
a.set_axis_off()
a.imshow(np.log(np.abs(v1)),
interpolation = 'none')
fig.tight_layout()
fig.savefig('tst_sfields.pdf')
# look at moments and histogram
#print('moments are ', f['moments/scal'][0])
#fig = plt.figure(figsize=(6,6))
#a = fig.add_subplot(211)
#a.plot(f['histograms/scal'][0])
#a.set_yscale('log')
#a = fig.add_subplot(212)
#a.plot(f['spectra/scal'][0])
#a.set_xscale('log')
#a.set_yscale('log')
#fig.tight_layout()
#fig.savefig('tst.pdf')
return None
if __name__ == '__main__':
main()
|
OCA/multi-company | account_invoice_consolidated/models/res_partner.py | Python | agpl-3.0 | 698 | 0.001433 | # Copyright (C) 2019 Open Source Integrators
# Copyright (C) 2019 Serpent Consulting Services Pvt. Ltd.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class ResPartner(models.Model):
_inherit = "res.partner"
def view_consolidated_invoice(self):
self.ensure_one()
cons_invoices_rec = self.env["account.invoice.consolidated"].search(
| [("partner_id", "=", self.id)]
)
| action = self.env.ref(
"account_invoice_consolidated." "account_invoice_consolidated_action"
)
action = action.read()[0]
action["domain"] = [("id", "in", cons_invoices_rec.ids)]
return action
|
MaximNevrov/neutron | neutron/cmd/sanity/checks.py | Python | apache-2.0 | 13,900 | 0.000144 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import shutil
import tempfile
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron._i18n import _LE
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ip_link_support
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils as agent_utils
from neutron.common import constants as n_consts
from neutron.plugins.common import constants as const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as ovs_const
from neutron.tests import base
LOG = logging.getLogger(__name__)
MINIMUM_DNSMASQ_VERSION = 2.67
MINIMUM_DIBBLER_VERSION = '1.0.1'
def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
name = base.get_rand_device_name(prefix='vxlantest-')
with ovs_lib.OVSBridge(name) as br:
port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN)
return port != ovs_lib.INVALID_OFPORT
def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'):
name = base.get_rand_device_name(prefix='genevetest-')
with ovs_lib.OVSBridge(name) as br:
port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_GENEVE)
return port != ovs_lib.INVALID_OFPORT
def iproute2_vxlan_supported():
ip = ip_lib.IPWrapper()
name = base.get_rand_device_name(prefix='vxlantest-')
port = ip.add_vxlan(name, 3000)
ip.del_veth(name)
return name == port.name
def patch_supported():
name, peer_name, patch_name = base.get_related_rand_device_names(
['patchtest-', 'peertest0-', 'peertest1-'])
with ovs_lib.OVSBridge(name) as br:
port = br.add_patch_port(patch_name, peer_name)
return port != ovs_lib.INVALID_OFPORT
def nova_notify_supported():
try:
import neutron.notifiers.nova # noqa since unused
return True
except ImportError:
return False
def ofctl_arg_supported(cmd, **kwargs):
"""Verify if ovs-ofctl binary supports cmd with **kwargs.
:param cmd: ovs-ofctl command to use for test.
:param **kwargs: arguments to test with the command.
:returns: a boolean if the supplied arguments are supported.
"""
br_name = base.get_rand_device_name(prefix='br-test-')
with ovs_lib.OVSBridge(br_name) as test_br:
full_args = ["ovs-ofctl", cmd, test_br.br_name,
ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0])]
try:
agent_utils.execute(full_args, run_as_root=True)
except RuntimeError as e:
LOG.debug("Exception while checking supported feature via "
"command %s. Exception: %s", full_args, e)
return False
except Exception:
LOG.exception(_LE("Unexpected exception while checking supported"
" feature via command: %s"), full_args)
return False
else:
return True
def arp_responder_supported():
mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix)
ip = netaddr.IPAddress('240.0.0.1')
actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip}
return ofctl_arg_supported(cmd='add-flow',
table=21,
| priority=1,
proto='arp',
dl_vlan=42,
nw_dst='%s' % ip,
actions=actions)
def arp_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=24,
priority=1,
proto='arp',
arp_op='0x2',
arp_spa='1.1.1.1',
actions="NORMAL")
def icmpv6_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=ovs_const.ARP_SPOOF_TABLE,
priority=1,
dl_type=n_consts.ETHERTYPE_IPV6,
nw_proto=n_consts.PROTO_NUM_IPV6_ICMP,
icmp_type=n_consts.ICMPV6_TYPE_NA,
nd_target='fdf8:f53b:82e4::10',
actions="NORMAL")
def vf_management_supported():
is_supported = True
required_caps = (
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE)
try:
vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section()
for cap in required_caps:
if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported(
vf_section, cap):
is_supported = False
LOG.debug("ip link command does not support "
"vf capability '%(cap)s'", cap)
except ip_link_support.UnsupportedIpLinkCommand:
LOG.exception(_LE("Unexpected exception while checking supported "
"ip link command"))
return False
return is_supported
def netns_read_requires_helper():
ipw = ip_lib.IPWrapper()
nsname = "netnsreadtest-" + uuidutils.generate_uuid()
ipw.netns.add(nsname)
try:
# read without root_helper. if exists, not required.
ipw_nohelp = ip_lib.IPWrapper()
exists = ipw_nohelp.netns.exists(nsname)
finally:
ipw.netns.delete(nsname)
return not exists
def get_minimal_dnsmasq_version_supported():
return MINIMUM_DNSMASQ_VERSION
def dnsmasq_version_supported():
try:
cmd = ['dnsmasq', '--version']
env = {'LC_ALL': 'C'}
out = agent_utils.execute(cmd, addl_env=env)
m = re.search(r"version (\d+\.\d+)", out)
ver = float(m.group(1)) if m else 0
if ver < MINIMUM_DNSMASQ_VERSION:
return False
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dnsmasq version. "
"Exception: %s", e)
return False
return True
class KeepalivedIPv6Test(object):
def __init__(self, ha_port, gw_port, gw_vip, default_gw):
self.ha_port = ha_port
self.gw_port = gw_port
self.gw_vip = gw_vip
self.default_gw = default_gw
self.manager = None
self.config = None
self.config_path = None
self.nsname = "keepalivedtest-" + uuidutils.generate_uuid()
self.pm = external_process.ProcessMonitor(cfg.CONF, 'router')
self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval
def configure(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1,
['169.254.192.0/18'],
advert_int=5)
instance1.track_interfaces.append(self.ha_port)
# Configure keepalived with an IPv6 address (gw_vip) on gw_port.
vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port)
instance1.vips.append(vip_addr1)
# Configure keepalived with an IPv6 default route on gw_port.
gateway_ro | |
pdorrell/aptrow | aptrow_server.py | Python | gpl-3.0 | 1,772 | 0.011851 | """ Copyright 2009 Philip Dorrell http://www.1729.com/ (email: http://www.1729.com/email.html)
This file is part of Aptrow ("Advance Programming Technology Read-Only Webification": http://www.1729.com/aptrow/)
Aptrow is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
Aptrow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Aptrow (as license-gplv3.txt).
If not, see <http://www.gnu.org/licenses/>."""
import aptrow
from aptrow import addModule, runAptrowServer
# define resource modules
# format: addModule (<prefix>, <python module>)
addModule ("base", "aptrow")
addModule ("files", "files_module")
addModule ("strings", "strings_module")
addModule ("zip", "zip_module")
addModule ("aptrow", "aptrow_module")
addModule ("sqlite", "sqlite_module")
# Run the application as a web server on localhost:8000 (preventing external | IP access)
# SECURITY NOTE: This demo application gives r | ead-only access to all files and directories
# on the local filesystem which can be accessed by the user running the application. So beware.
#
# (Also, this application may create temporary files which it does not delete, which are copies
# of the contents of 'file-like' objects which are not themselves files.)
runAptrowServer('localhost', 8000)
# suggested starting URL: http://localhost:8000/files/dir?path=c:\
|
lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/tests/test_mgmt_containerregistry_2017_03_01.py | Python | mit | 4,129 | 0.002665 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
from azure.mgmt.containerregistry.v2017_03_01.models import (
RegistryCreateParameters,
RegistryUpdateParameters,
StorageAccountParameters,
Sku,
SkuTier,
ProvisioningState,
PasswordName
)
import azure.mgmt.storage
from devtools_testutils import (
AzureMgmtTestCase, FakeStorageAccount,
ResourceGroupPreparer, StorageAccountPreparer
)
FAKE_STORAGE = FakeStorageAccount(
name='pyacr',
id=''
)
DEFAULT_LOCATION = 'westcentralus'
DEFAULT_SKU_NAME = 'Basic'
DEFAULT_KEY_VALUE_PAIR = {
'key': 'value'
}
class MgmtACRTest20170301(AzureMgmtTestCase):
def setUp(self):
super(MgmtACRTest20170301, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.containerregistry.ContainerRegistryManagementClient,
api_version='2017-03-01'
)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
@StorageAccountPreparer(name_prefix='pyacr', location=DEFAULT_LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_basic_registry(self, resource_group, location, storage_account, storage_account_key):
registry_name = self.get_resource_name('pyacr')
name_status = self.client.registries.check_name_availability(registry_name)
self.assertTrue(name_status.name_available)
# Create a Basic registry
registry = self.client.registries.create(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_create_parameters=RegistryCreateParameters(
location=location,
sku=Sku(
name=DEFAULT_SKU_NAME
),
storage_account=StorageAccountParameters(
name=storage_account.name,
access_key=storage_account_key
)
)
).result()
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.location, location)
self.assertEqual(registry.sku.name, DEF | AULT_SKU_NAME)
self.assertEqual(registry.sku.tier, SkuTier.basic.value)
self.assertEqual(registry.provisioning_state.value, ProvisioningState.succeeded.value)
self.assertEqual(registry.admin_user_enabled, False)
registries = list(self.client.registries.list_by_resource_group(resour | ce_group.name))
self.assertEqual(len(registries), 1)
# Update the registry with new tags and enable admin user
registry = self.client.registries.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_update_parameters=RegistryUpdateParameters(
tags=DEFAULT_KEY_VALUE_PAIR,
admin_user_enabled=True
)
)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
registry = self.client.registries.get(resource_group.name, registry_name)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
credentials = self.client.registries.list_credentials(resource_group.name, registry_name)
self.assertEqual(len(credentials.passwords), 2)
credentials = self.client.registries.regenerate_credential(
resource_group.name, registry_name, PasswordName.password)
self.assertEqual(len(credentials.passwords), 2)
self.client.registries.delete(resource_group.name, registry_name)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
eduNEXT/edx-platform | openedx/core/djangoapps/coursegraph/management/commands/tests/test_dump_to_neo4j.py | Python | agpl-3.0 | 20,829 | 0.00221 | """
Tests for the dump_to_neo4j management command.
"""
from datetime import datetime
from unittest import mock
import ddt
from django.core.management import call_command
from edx_toggles.toggles.testutils import override_waffle_switch
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
import openedx.core.djangoapps.content.block_structure.config as block_structure_config
from openedx.core.djangoapps.content.block_structure.signals import update_block_structure_on_course_publish
from openedx.core.djangoapps.coursegraph.management.commands.dump_to_neo4j import ModuleStoreSerializer
from openedx.core.djangoapps.coursegraph.management.commands.tests.utils import MockGraph, MockNodeMatcher
from openedx.core.djangoapps.coursegraph.tasks import (
coerce_types,
serialize_course,
serialize_item,
should_dump_course,
strip_branch_and_version
)
from openedx.core.djangolib.testing.utils import skip_unless_lms
class TestDumpToNeo4jCommandBase(SharedModuleStoreTestCase):
"""
Base class for the test suites in this file. Sets up a couple courses.
"""
@classmethod
def setUpClass(cls):
r"""
Creates two courses; one that's just a course module, and one that
looks like:
course
|
chapter
|
sequential
|
vertical
/ | \ \
/ | \ ----------
/ | \ \
/ | --- \
/ | \ \
html -> problem -> video -> video2
The side-pointing arrows (->) are PRECEDES relationships; the more
vertical lines are PARENT_OF relationships.
The vertical in this course and the first video have the same
display_name, so that their block_ids are the same. This is to
test for a bug where xblocks with the same block_ids (but different
locations) pointed to themselves erroneously.
"""
super().setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(parent=cls.course, category='chapter')
cls.sequential = ItemFactory.create(parent=cl | s.chapter, category='sequential')
cls.vertical = ItemFactory.create(parent=cls.sequential, category='vertical', display_name='subject')
cls.html = ItemFactory.create(parent=cls.vertical, category='html')
cls.problem = ItemFa | ctory.create(parent=cls.vertical, category='problem')
cls.video = ItemFactory.create(parent=cls.vertical, category='video', display_name='subject')
cls.video2 = ItemFactory.create(parent=cls.vertical, category='video')
cls.course2 = CourseFactory.create()
cls.course_strings = [str(cls.course.id), str(cls.course2.id)]
@staticmethod
def setup_mock_graph(mock_matcher_class, mock_graph_class, transaction_errors=False):
"""
Replaces the py2neo Graph object with a MockGraph; similarly replaces
NodeMatcher with MockNodeMatcher.
Arguments:
mock_matcher_class: a mocked NodeMatcher class
mock_graph_class: a mocked Graph class
transaction_errors: a bool for whether we should get errors
when transactions try to commit
Returns: an instance of MockGraph
"""
mock_graph = MockGraph(transaction_errors=transaction_errors)
mock_graph_class.return_value = mock_graph
mock_node_matcher = MockNodeMatcher(mock_graph)
mock_matcher_class.return_value = mock_node_matcher
return mock_graph
def assertCourseDump(self, mock_graph, number_of_courses, number_commits, number_rollbacks):
"""
Asserts that we have the expected number of courses, commits, and
rollbacks after we dump the modulestore to neo4j
Arguments:
mock_graph: a MockGraph backend
number_of_courses: number of courses we expect to find
number_commits: number of commits we expect against the graph
number_rollbacks: number of commit rollbacks we expect
"""
courses = {node['course_key'] for node in mock_graph.nodes}
assert len(courses) == number_of_courses
assert mock_graph.number_commits == number_commits
assert mock_graph.number_rollbacks == number_rollbacks
@ddt.ddt
class TestDumpToNeo4jCommand(TestDumpToNeo4jCommandBase):
"""
Tests for the dump to neo4j management command
"""
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.NodeMatcher')
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.Graph')
@ddt.data(1, 2)
def test_dump_specific_courses(self, number_of_courses, mock_graph_class, mock_matcher_class):
"""
Test that you can specify which courses you want to dump.
"""
mock_graph = self.setup_mock_graph(mock_matcher_class, mock_graph_class)
call_command(
'dump_to_neo4j',
courses=self.course_strings[:number_of_courses],
host='mock_host',
port=7687,
user='mock_user',
password='mock_password',
)
self.assertCourseDump(
mock_graph,
number_of_courses=number_of_courses,
number_commits=number_of_courses,
number_rollbacks=0
)
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.NodeMatcher')
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.Graph')
def test_dump_skip_course(self, mock_graph_class, mock_matcher_class):
"""
Test that you can skip courses.
"""
mock_graph = self.setup_mock_graph(
mock_matcher_class, mock_graph_class
)
call_command(
'dump_to_neo4j',
skip=self.course_strings[:1],
host='mock_host',
port=7687,
user='mock_user',
password='mock_password',
)
self.assertCourseDump(
mock_graph,
number_of_courses=1,
number_commits=1,
number_rollbacks=0,
)
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.NodeMatcher')
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.Graph')
def test_dump_skip_beats_specifying(self, mock_graph_class, mock_matcher_class):
"""
Test that if you skip and specify the same course, you'll skip it.
"""
mock_graph = self.setup_mock_graph(
mock_matcher_class, mock_graph_class
)
call_command(
'dump_to_neo4j',
skip=self.course_strings[:1],
courses=self.course_strings[:1],
host='mock_host',
port=7687,
user='mock_user',
password='mock_password',
)
self.assertCourseDump(
mock_graph,
number_of_courses=0,
number_commits=0,
number_rollbacks=0,
)
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.NodeMatcher')
@mock.patch('openedx.core.djangoapps.coursegraph.tasks.Graph')
def test_dump_all_courses(self, mock_graph_class, mock_matcher_class):
"""
Test if you don't specify which courses to dump, then you'll dump
all of them.
"""
mock_graph = self.setup_mock_graph(
mock_matcher_class, mock_graph_class
)
call_command(
'dump_to_neo4j',
host='mock_host',
port=7687,
user='mock_user',
password='mock_password'
)
self.assertCourseDump(
mock_graph,
number_of_courses=2,
number_commits=2,
number_rollbacks=0,
)
class SomeThing:
"""Just to test the stringification of an object."""
def __str__(self):
return "<SomeThing>"
@skip_unless_lms
@dd |
akavlie/SMSr | sms/tw_send.py | Python | bsd-3-clause | 1,031 | 0.007759 | from sms import app
import twilio
def twilio_send(phone_number, message):
"""Sends an SMS via the Twilio service. """
data = {'From': app.config['CALLER_ID'],
'To': phone_number,
'Body': message}
account = twilio.Account(app.config['ACCOUNT_SID'],
app.config['ACCOUNT_TOKEN'])
tw_response = account.request('/%s/Accounts/%s/SMS/Messages.json' %
| (app.config['API_VERSION'], app.config['ACCOUNT_SID']),
'POST', data)
return tw_response
def twilio_update(sid):
"""Updat | e status for a given SMS. """
account = twilio.Account(app.config['ACCOUNT_SID'],
app.config['ACCOUNT_TOKEN'])
tw_response = account.request('/%s/Accounts/%s/SMS/Messages/%s.json' %
(app.config['API_VERSION'],
app.config['ACCOUNT_SID'], sid),
'GET')
return tw_response
|
eesatfan/openpli-enigma2 | lib/python/Screens/EpgSelection.py | Python | gpl-2.0 | 15,315 | 0.030167 | from Screen import Screen
from Components.config import config, ConfigClock
from Components.Button import Button
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.EpgList import EPGList, EPG_TYPE_SINGLE, EPG_TYPE_SIMILAR, EPG_TYPE_MULTI
from Components.ActionMap import ActionMap
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Event import Event
from Screens.ChoiceBox import ChoiceBox
from Screens.TimerEdit import TimerSanityConflict, TimerEditList
from Screens.EventView import EventViewSimple
from Screens.MessageBox import MessageBox
from TimeDateInput import TimeDateInput
from enigma import eServiceReference
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from TimerEntry import TimerEntry
from ServiceReference import ServiceReference
from time import localtime, time
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Tools.BoundFunction import boundFunction
mepg_config_initialized = False
class EPGSelection(Screen):
EMPTY = 0
ADD_TIMER = 1
REMOVE_TIMER = 2
ZAP = 1
def __init__(self, session, service, zapFunc=None, eventid=None, bouquetChangeCB=None, serviceChangeCB=None):
Screen.__init__(self, session)
self.bouquetChangeCB = bouquetChangeCB
self.serviceChangeCB = serviceChangeCB
self.ask_time = -1 #now
self["key_red"] = Button("")
self.closeRecursive = False
self.saved_title = None
self["Service"] = ServiceEvent()
self["Event"] = Event()
self.session = session
if isinstance(service, str) and eventid != None:
self.type = EPG_TYPE_SIMILAR
self["key_yellow"] = Button()
self["key_blue"] = Button()
self["key_red"] = Button()
self.currentService=service
self.eventid = eventid
self.zapFunc = None
elif isinstance(service, eServiceReference) or isinstance(service, str):
self.type = EPG_TYPE_SINGLE
self["key_yellow"] = Button()
self["key_blue"] = Button()
self.currentService=ServiceReference(service)
self.zapFunc = zapFunc
self.sort_type = 0
self.setSortDescription()
else:
self.skinName = "EPGSelectionMulti"
self.type = EPG_TYPE_MULTI
self["key_yellow"] = Button(pgettext("button label, 'previous screen'", "Prev"))
| self["key_blue"] = Button(pgettext("button label, 'next screen'", "Next"))
self["now_button"] = Pixmap()
self["next_button"] = Pixmap()
self["more_button"] = Pixmap( | )
self["now_button_sel"] = Pixmap()
self["next_button_sel"] = Pixmap()
self["more_button_sel"] = Pixmap()
self["now_text"] = Label()
self["next_text"] = Label()
self["more_text"] = Label()
self["date"] = Label()
self.services = service
self.zapFunc = zapFunc
self["key_green"] = Button(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
self.key_red_choice = self.EMPTY
self["list"] = EPGList(type = self.type, selChangedCB = self.onSelectionChanged, timer = session.nav.RecordTimer)
self["actions"] = ActionMap(["EPGSelectActions", "OkCancelActions"],
{
"cancel": self.closeScreen,
"ok": self.eventSelected,
"timerAdd": self.timerAdd,
"yellow": self.yellowButtonPressed,
"blue": self.blueButtonPressed,
"info": self.infoKeyPressed,
"red": self.zapTo,
"menu": self.furtherOptions,
"nextBouquet": self.nextBouquet, # just used in multi epg yet
"prevBouquet": self.prevBouquet, # just used in multi epg yet
"nextService": self.nextService, # just used in single epg yet
"prevService": self.prevService, # just used in single epg yet
"preview": self.eventPreview,
})
self["actions"].csel = self
self.onLayoutFinish.append(self.onCreate)
def nextBouquet(self):
if self.bouquetChangeCB:
self.bouquetChangeCB(1, self)
def prevBouquet(self):
if self.bouquetChangeCB:
self.bouquetChangeCB(-1, self)
def nextService(self):
if self.serviceChangeCB:
self.serviceChangeCB(1, self)
def prevService(self):
if self.serviceChangeCB:
self.serviceChangeCB(-1, self)
def enterDateTime(self):
if self.type == EPG_TYPE_MULTI:
global mepg_config_initialized
if not mepg_config_initialized:
config.misc.prev_mepg_time=ConfigClock(default = time())
mepg_config_initialized = True
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.prev_mepg_time )
def furtherOptions(self):
menu = []
text = _("Select action")
event = self["list"].getCurrent()[0]
if event:
menu = [(p.name, boundFunction(self.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO) \
if 'selectedevent' in p.__call__.func_code.co_varnames]
if menu:
text += _(": %s") % event.getEventName()
if self.type == EPG_TYPE_MULTI:
menu.append((_("Goto specific date/time"),self.enterDateTime))
menu.append((_("Timer Overview"), self.openTimerOverview))
if len(menu) == 1:
menu and menu[0][1]()
elif len(menu) > 1:
def boxAction(choice):
if choice:
choice[1]()
self.session.openWithCallback(boxAction, ChoiceBox, title=text, list=menu)
def runPlugin(self, plugin):
event = self["list"].getCurrent()
plugin(session=self.session, selectedevent=event)
def openTimerOverview(self):
self.session.open(TimerEditList)
def onDateTimeInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
self.ask_time=ret[1]
self["list"].fillMultiEPG(self.services, ret[1])
def closeScreen(self):
if self.zapFunc:
self.zapFunc(None, zapback = True)
self.close(self.closeRecursive)
def infoKeyPressed(self):
cur = self["list"].getCurrent()
event = cur[0]
service = cur[1]
if event is not None:
if self.type != EPG_TYPE_SIMILAR:
self.session.open(EventViewSimple, event, service, self.eventViewCallback, self.openSimilarList)
else:
self.session.open(EventViewSimple, event, service, self.eventViewCallback)
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def setServices(self, services):
self.services = services
self.onCreate()
def setService(self, service):
self.currentService = service
self.onCreate()
#just used in multipeg
def onCreate(self):
l = self["list"]
l.recalcEntrySize()
if self.type == EPG_TYPE_MULTI:
l.fillMultiEPG(self.services, self.ask_time)
l.moveToService(self.session.nav.getCurrentlyPlayingServiceOrGroup())
elif self.type == EPG_TYPE_SINGLE:
service = self.currentService
self["Service"].newService(service.ref)
if self.saved_title is None:
self.saved_title = self.instance.getTitle()
title = self.saved_title + ' - ' + service.getServiceName()
self.instance.setTitle(title)
l.fillSingleEPG(service)
else:
l.fillSimilarList(self.currentService, self.eventid)
def eventViewCallback(self, setEvent, setService, val):
l = self["list"]
old = l.getCurrent()
if val == -1:
self.moveUp()
elif val == +1:
self.moveDown()
cur = l.getCurrent()
if self.type == EPG_TYPE_MULTI and cur[0] is None and cur[1].ref != old[1].ref:
self.eventViewCallback(setEvent, setService, val)
else:
setService(cur[1])
setEvent(cur[0])
def zapTo(self):
if self.key_red_choice == self.ZAP and self.zapFunc:
self.closeRecursive = True
from Components.ServiceEventTracker import InfoBarCount
if InfoBarCount > 1:
self.eventPreview()
else:
self.zapSelectedService()
self.close(self.closeRecursive)
def zapSelectedService(self, prev=False):
lst = self["list"]
count = lst.getCurrentChangeCount()
if count == 0:
ref = lst.getCurrent()[1]
if ref is not None:
self.zapFunc(ref.ref, preview = prev)
def eventPreview(self):
if self.zapFunc:
# if enabled, then closed whole EPG with EXIT:
# self.closeRecursive = True
self.zapSelectedService(True)
def eventSelected(self):
if self.skinName == "EPGSelectionMulti":
cur = self["list"].getCurrent()
event = cur[0]
ref = cur[1] and cur[1].ref.toString()
if ref and event:
self.session.open(EPGSelection, ref)
else:
self.infoKeyPressed()
def |
chennan47/osf.io | api_tests/nodes/views/test_node_detail.py | Python | apache-2.0 | 75,427 | 0.000849 | # -*- coding: utf-8 -*-
import mock
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf.models.licenses import NodeLicense
from osf.utils.sanitize import strip_html
from osf.utils import permissions
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
NodeLicenseRecordFactory,
PrivateLinkFactory,
PreprintFactory,
IdentifierFactory,
InstitutionFactory,
)
from rest_framework import exceptions
from tests.base import fake
from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not
from website.views import find_bookmark_collection
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeDetail:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user)
@pytest.fixture()
def project_private(self, user):
return ProjectFactory(
title='Project Two',
is_public=False,
creator=user)
@pytest.fixture()
def component_public(self, user, project_public):
return NodeFactory(parent=project_public, creator=user, is_public=True)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_component_public(self, component_public):
return '/{}nodes/{}/'.format(API_BASE, component_public._id)
@pytest.fixture()
def permissions_read(self):
return ['read']
@pytest.fixture()
def permissions_write(self):
return ['read', 'write']
@pytest.fixture()
def permissions_admin(self):
return ['read', 'admin', 'write']
def test_return_project_details(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
permissions_read, permissions_admin):
# test_return_public_project_details_logged_out
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_public_project_details_contributor_logged_in
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_public_project_details_non_contributor_logged_in
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_private_project_details_logged_in_admin_contributor
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.content_t | ype == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_private_project_details_logged_out
res = app. | get(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_details_logged_in_non_contributor
res = app.get(url_private, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_return_private_project_details_logged_in_write_contributor(
self, app, user, user_two, project_private, url_private, permissions_write):
project_private.add_contributor(
contributor=user_two, auth=Auth(user), save=True)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_write)
def test_top_level_project_has_no_parent(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert 'parent' not in res.json['data']['relationships']
assert 'id' in res.json['data']
assert res.content_type == 'application/vnd.api+json'
def test_child_project_has_parent(
self, app, user, project_public, url_public):
public_component = NodeFactory(
parent=project_public, creator=user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(
API_BASE, public_component._id)
res = app.get(public_component_url)
assert res.status_code == 200
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert urlparse(url).path == url_public
def test_node_has(self, app, url_public):
# test_node_has_children_link
res = app.get(url_public)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = '{}children/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_contributors_link
res = app.get(url_public)
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = '{}contributors/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_node_links_link
res = app.get(url_public)
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = '{}node_links/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_registrations_link
res = app.get(url_public)
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = '{}registrations/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_files_link
res = app.get(url_public)
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = '{}files/' |
lukas-hetzenecker/home-assistant | tests/components/plaato/__init__.py | Python | apache-2.0 | 40 | 0 | """Te | sts for the Plaato integration." | ""
|
google-research/google-research | snerg/train.py | Python | apache-2.0 | 11,308 | 0.006986 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compli | ance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training script for Nerf."""
import functools
import gc
import time
from absl import app
from absl import flags
import flax
from flax.metrics import tensorboard
from flax.training import checkpoints
import jax
from jax import config
from jax import random
import jax.numpy as jnp
import numpy as np
from snerg.nerf import datasets
from snerg.nerf import models
from snerg.nerf import utils
FLAGS = flags.FLAGS
utils.define_flags()
config.parse_flags_with_absl()
def train_step(model, rng, state, batch, lr):
"""One optimization step.
Args:
model: The linen model.
rng: jnp.ndarray, random number generator.
state: utils.TrainState, state of the model/optimizer.
batch: dict, a mini-batch of data for training.
lr: float, real-time learning rate.
Returns:
new_state: utils.TrainState, new training state.
stats: list. [(loss, psnr), (loss_coarse, psnr_coarse)].
rng: jnp.ndarray, updated random number generator.
"""
rng, key_0, key_1 = random.split(rng, 3)
def loss_fn(variables):
rays = batch["rays"]
ret = model.apply(variables, key_0, key_1, rays, FLAGS.randomized)
if len(ret) not in (1, 2):
raise ValueError(
"ret should contain either 1 set of output (coarse only), or 2 sets"
"of output (coarse as ret[0] and fine as ret[1]).")
# The main prediction is always at the end of the ret list.
rgb, _, _, sigma, _, _ = ret[-1]
loss = ((rgb - batch["pixels"][Ellipsis, :3])**2).mean()
psnr = utils.compute_psnr(loss)
if len(ret) > 1:
# If there are both coarse and fine predictions, we compute the loss for
# the coarse prediction (ret[0]) as well.
rgb_c, _, _, sigma_c, _, _ = ret[0]
loss_c = ((rgb_c - batch["pixels"][Ellipsis, :3])**2).mean()
psnr_c = utils.compute_psnr(loss_c)
sparsity_c = FLAGS.sparsity_strength * jax.numpy.log(1.0 + sigma_c**2 /
0.5).mean()
else:
loss_c = 0.
psnr_c = 0.
sparsity_c = 0.0
def tree_sum_fn(fn):
return jax.tree_util.tree_reduce(
lambda x, y: x + fn(y), variables, initializer=0)
weight_l2 = (
tree_sum_fn(lambda z: jnp.sum(z**2)) /
tree_sum_fn(lambda z: jnp.prod(jnp.array(z.shape))))
sparsity = FLAGS.sparsity_strength * jax.numpy.log(1.0 +
sigma**2 / 0.5).mean()
stats = utils.Stats(
loss=loss,
psnr=psnr,
loss_c=loss_c,
psnr_c=psnr_c,
weight_l2=weight_l2,
sparsity=sparsity,
sparsity_c=sparsity_c)
return (loss + loss_c + FLAGS.weight_decay_mult * weight_l2 + sparsity +
sparsity_c), stats
(_, stats), grad = (
jax.value_and_grad(loss_fn, has_aux=True)(state.optimizer.target))
grad = jax.lax.pmean(grad, axis_name="batch")
stats = jax.lax.pmean(stats, axis_name="batch")
# Clip the gradient by value.
if FLAGS.grad_max_val > 0:
clip_fn = lambda z: jnp.clip(z, -FLAGS.grad_max_val, FLAGS.grad_max_val)
grad = jax.tree_util.tree_map(clip_fn, grad)
# Clip the (possibly value-clipped) gradient by norm.
if FLAGS.grad_max_norm > 0:
grad_norm = jnp.sqrt(
jax.tree_util.tree_reduce(
lambda x, y: x + jnp.sum(y**2), grad, initializer=0))
mult = jnp.minimum(1, FLAGS.grad_max_norm / (1e-7 + grad_norm))
grad = jax.tree_util.tree_map(lambda z: mult * z, grad)
new_optimizer = state.optimizer.apply_gradient(grad, learning_rate=lr)
new_state = state.replace(optimizer=new_optimizer)
return new_state, stats, rng
def main(unused_argv):
rng = random.PRNGKey(20200823)
# Shift the numpy random seed by host_id() to shuffle data loaded by different
# hosts.
np.random.seed(20201473 + jax.host_id())
if FLAGS.config is not None:
utils.update_flags(FLAGS)
if FLAGS.batch_size % jax.device_count() != 0:
raise ValueError("Batch size must be divisible by the number of devices.")
if FLAGS.train_dir is None:
raise ValueError("train_dir must be set. None set now.")
if FLAGS.data_dir is None:
raise ValueError("data_dir must be set. None set now.")
dataset = datasets.get_dataset("train", FLAGS)
test_dataset = datasets.get_dataset("test", FLAGS)
rng, key = random.split(rng)
model, variables = models.get_model(key, dataset.peek(), FLAGS)
optimizer = flax.optim.Adam(FLAGS.lr_init).create(variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, variables
learning_rate_fn = functools.partial(
utils.learning_rate_decay,
lr_init=FLAGS.lr_init,
lr_final=FLAGS.lr_final,
max_steps=FLAGS.max_steps,
lr_delay_steps=FLAGS.lr_delay_steps,
lr_delay_mult=FLAGS.lr_delay_mult)
train_pstep = jax.pmap(
functools.partial(train_step, model),
axis_name="batch",
in_axes=(0, 0, 0, None),
donate_argnums=(2,))
def render_fn(variables, key_0, key_1, rays):
return jax.lax.all_gather(
model.apply(variables, key_0, key_1, rays, FLAGS.randomized),
axis_name="batch")
render_pfn = jax.pmap(
render_fn,
in_axes=(None, None, None, 0), # Only distribute the data input.
donate_argnums=(3,),
axis_name="batch",
)
# Compiling to the CPU because it's faster and more accurate.
ssim_fn = jax.jit(
functools.partial(utils.compute_ssim, max_val=1.), backend="cpu")
if not utils.isdir(FLAGS.train_dir):
utils.makedirs(FLAGS.train_dir)
state = checkpoints.restore_checkpoint(FLAGS.train_dir, state)
# Resume training a the step of the last checkpoint.
init_step = state.optimizer.state.step + 1
state = flax.jax_utils.replicate(state)
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(FLAGS.train_dir)
# Prefetch_buffer_size = 3 x batch_size
pdataset = flax.jax_utils.prefetch_to_device(dataset, 3)
n_local_devices = jax.local_device_count()
rng = rng + jax.host_id() # Make random seed separate across hosts.
keys = random.split(rng, n_local_devices) # For pmapping RNG keys.
gc.disable() # Disable automatic garbage collection for efficiency.
stats_trace = []
reset_timer = True
for step, batch in zip(range(init_step, FLAGS.max_steps + 1), pdataset):
if reset_timer:
t_loop_start = time.time()
reset_timer = False
lr = learning_rate_fn(step)
state, stats, keys = train_pstep(keys, state, batch, lr)
if jax.host_id() == 0:
stats_trace.append(stats)
if step % FLAGS.gc_every == 0:
gc.collect()
# Log training summaries. This is put behind a host_id check because in
# multi-host evaluation, all hosts need to run inference even though we
# only use host 0 to record results.
if jax.host_id() == 0:
if step % FLAGS.print_every == 0:
summary_writer.scalar("train_loss", stats.loss[0], step)
summary_writer.scalar("train_psnr", stats.psnr[0], step)
summary_writer.scalar("train_sparsity", stats.sparsity[0], step)
summary_writer.scalar("train_loss_coarse", stats.loss_c[0], step)
summary_writer.scalar("train_psnr_coarse", stats.psnr_c[0], step)
summary_writer.scalar("train_sparsity_coarse", stats.sparsity_c[0],
step)
summary_writer.scalar("weight_l2", stats.weight_l2[0], step)
avg_loss = np.mean(np.concatenate([s.loss for s in stats_trace]))
avg_psnr = np.mean(np.concatenate([s.psnr for s in stats_trace]))
stats_trace = []
summary_wr |
viranch/exodus | resources/lib/sources_de/streamkiste.py | Python | gpl-3.0 | 5,119 | 0.009768 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urllib, urlparse, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['streamkiste.tv']
self.base_link = 'http://streamkiste.tv'
self.search_link = '/livesearch.php?keyword=%s&nonce=%s'
self.drop_link = '/drop.php'
def movie(self, imdb, title, localtitle, year):
try:
url = self.__search(title, year)
if not url and title != localtitle: url = self.__search(localtitle, year)
return url
except:
return
# code is equal to 1kino
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs={'id': 'stream-container'})[0]
r = re.compile('<div id="stream-h">.*?</li>.*?</div>\s*</div>', re.IGNORECASE | re.DOTALL).findall(r)
r = [(client.parseDOM(i, 'div', attrs={'id': 'mirror-head'}),
client.parseDOM(i, 'div', attrs={'id': 'stream-links'})
) for i in r]
r = [(i[0][0], i[1]) for i in r if len(i[0]) > 0]
r = [(re.findall('.+\|(.+)', i[0]), i[1]) for i in r]
r = [(i[0][0].strip(), i[1]) for i in r if len(i[0]) > 0]
for name, links in r:
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in | fmt]
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i | in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt or any(i.endswith('3d') for i in fmt): info.append('3D')
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
links = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in links]
links = [(i[0][0], i[1][0].lower().strip()) for i in links if len(i[0]) > 0 and len(i[1]) > 0]
links = [(i[0], i[1]) for i in links if i[1] in hostDict]
info = ' | '.join(info)
for link, hoster in links:
sources.append({'source': hoster, 'quality': quality,
'language': 'de',
'url': link,
'info': info,
'direct': False,
'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
url = client.request(url, output='geturl')
if self.base_link not in url:
return url
def __search(self, title, year):
try:
r = client.request(self.base_link)
r = re.findall('sL10n\s*=\s*({.*?});', r)[0]
r = json.loads(r)['nonce']
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), r)
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = json.loads(r)
r = [(i, r[i].get('url', ''), r[i].get('title', ''), r[i].get('extra', {}).get('names', ''),
r[i].get('extra', {}).get('date', '0')) for i in r]
r = [(i[0], i[1], client.replaceHTMLCodes(i[2]), client.replaceHTMLCodes(i[3]), i[4]) for i in r]
r = [i[1] for i in r if t == cleantitle.get(i[2]) or t == cleantitle.get(i[3]) and i[4] in y][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
|
jawilson/home-assistant | homeassistant/components/ubus/device_tracker.py | Python | apache-2.0 | 5,843 | 0.000513 | """Support for OpenWRT (ubus) routers."""
import logging
import re
from openwrt.ubus import Ubus
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DHCP_SOFTWARE = "dhcp_software"
DEFAULT_DHCP_SOFTWARE = "dnsmasq"
DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"]
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In(
DHCP_SOFTWARES
),
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an ubus scanner."""
dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE]
if dhcp_sw == "dnsmasq":
scanner = DnsmasqUbusDeviceScanner(config[DOMAIN])
elif dhcp_sw == "odhcpd":
scanner = OdhcpdUbusDeviceScanner(config[DOMAIN])
else:
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
def _refresh_on_access_denied(func):
"""If remove rebooted, it lost our session so rebuild one and try again."""
def decorator(self, *args, **kwargs):
"""Wrap the function to refresh session_id on PermissionError."""
try:
return func(self, *args, **kwargs)
except PermissionError:
_LOGGER.warning(
"Invalid session detected."
" Trying to refresh session_id and re-run RPC"
)
self.ubus.connect()
return func(self, *args, **kwargs)
return decorator
class UbusDeviceScanner(DeviceScanner):
"""
This class queries a wireless router running OpenWrt firmware.
Adapted from Tomato scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.url = f"http://{host}/ubus"
self.ubus = Ubus(self.url, self.username, self.password)
self.hostapd = []
self.mac2name = None
self.success_init = self.ubus.connect() is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def _generate_mac2name(self):
"""Return empty MAC to name dict. Overridden if DHCP server is set."""
self.mac2name = {}
@_refresh_on_access_denied
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
self._generate_mac2name()
if self.mac2name is None:
# Generation of mac2name dictionary failed
return None
name = self.mac2name.get(device.upper(), None)
return name
@_refresh_on_access_denied
def _update_info(self):
"""Ensure the information from the router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking hostapd")
if not self.hostapd:
hostapd = self.ubus.get_hostapd()
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
# for each access point
for hostapd in self.hostapd:
if result := self.ubus.get_hostapd_clients(hostapd):
results = results + 1
# Check for each device is authorized (valid wpa key)
for key in result["clients"].keys():
device = result["clients"][key]
if device["authorized"]:
| self.last_results.append(key)
return bool(results)
class DnsmasqUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the dnsmasq DHCP server."""
def __init__(self, config):
"""Initialize the scanner."""
super().__init__(config)
self.leasefi | le = None
def _generate_mac2name(self):
if self.leasefile is None:
if result := self.ubus.get_uci_config("dhcp", "dnsmasq"):
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
result = self.ubus.file_read(self.leasefile)
if result:
self.mac2name = {}
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the ubus.file_read()
return
class OdhcpdUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the odhcp DHCP server."""
def _generate_mac2name(self):
if result := self.ubus.get_dhcp_method("ipv4leases"):
self.mac2name = {}
for device in result["device"].values():
for lease in device["leases"]:
mac = lease["mac"] # mac = aabbccddeeff
# Convert it to expected format with colon
mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2))
self.mac2name[mac.upper()] = lease["hostname"]
else:
# Error, handled in the ubus.get_dhcp_method()
return
|
wisfern/vnpy | beta/api/korbit/korbit-python-master/korbit/private_api.py | Python | mit | 6,359 | 0.003931 | # -*- coding: utf-8 -*-
import time
from public_api import PublicAPI
class PrivateAPI(PublicAPI):
def __init__(self, client_id, secret, production=True, version="v1", timeout=20):
try:
super(self.__class__, self).__init__(production, version, timeout)
except TypeError:
PublicAPI.__init__(self, production, version, timeout)
self.__client_id = client_id
self.__secret = secret
self.__token = {}
# https://apidocs.korbit.co.kr/#authentication
def create_token_directly(self, username, password):
payload = {
'client_id': self.__client_id,
'client_secret': self.__secret,
'username': username,
'password': password,
'grant_type': "password"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
def set_token(self, token):
self.__token = token
def refresh_token(self):
payload = {
'client_id': self.__client_id,
'client_secret': self.__secret,
'refresh_token': self.__token['refresh_token'],
'grant_type': "refresh_token"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
def get_user_info(self):
return self.request_get("user/info", headers=self.headers)
@property
def headers(self):
return {
'Accept': 'application/json',
'Authorization': "{} {}".format(self.__token['token_type'], self.__token['access_token'])
}
# https://apidocs.korbit.co.kr/#exchange
def bid_order(self, bid_type, coin_amount=None, price=None, fiat_amount=None, currency_pair="btc_krw"):
payload = {
'type': bid_type,
'currency_pair': currency_pair,
'price': price,
'coin_amount': coin_amount,
'fiat_amount': fiat | _amount,
'nonce': self.nonce
}
#return self.request_post("user/orders/buy", headers=self.header | s, data=payload)
return self.request_post("user/orders/buy", headers=self.headers, data=payload)
def market_bid_order(self, fiat_amount, currency_pair="btc_krw"):
return self.bid_order('market', fiat_amount=fiat_amount, currency_pair=currency_pair)
def limit_bid_order(self, coin_amount, price, currency_pair="btc_krw"):
return self.bid_order('limit', coin_amount=coin_amount, price=price, currency_pair=currency_pair)
def ask_order(self, ask_type, coin_amount, price=None, currency_pair="btc_krw"):
payload = {
'type': ask_type,
'currency_pair': currency_pair,
'price': price,
'coin_amount': coin_amount,
'nonce': self.nonce
}
return self.request_post("user/orders/sell", headers=self.headers, data=payload)
def market_ask_order(self, coin_amount, currency_pair="btc_krw"):
return self.ask_order('market', coin_amount=coin_amount, currency_pair=currency_pair)
def limit_ask_order(self, coin_amount, price, currency_pair="btc_krw"):
return self.ask_order('limit', coin_amount, price, currency_pair)
def cancel_order(self, ids, currency_pair="btc_krw"):
payload = {
'id': ids,
'currency_pair': currency_pair,
'nonce': self.nonce
}
return self.request_post("user/orders/cancel", headers=self.headers, data=payload)
def list_open_orders(self, offset=0, limit=10, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair,
'offset': offset,
'limit': limit
}
return self.request_get("user/orders/open", headers=self.headers, params=params)
def view_exchange_orders(self, offset=0, limit=10, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair,
'offset': offset,
'limit': limit
}
print "user/orders" , self.headers , params
return self.request_get("user/orders", headers=self.headers, params=params)
def view_transfers(self, offset=0, limit=10, currency="btc"):
params = {
'currency': currency,
'offset': offset,
'limit': limit
}
return self.request_get("user/transfers", headers=self.headers, params=params)
def trading_volume_and_fees(self, currency_pair="all"):
params = {
'currency_pair': currency_pair
}
return self.request_get("user/volume", headers=self.headers, params=params)
# https://apidocs.korbit.co.kr/#wallet
def user_balances(self):
return self.request_get("user/balances", headers=self.headers)
def user_accounts(self):
return self.request_get("user/accounts", headers=self.headers)
def retrieve_wallet_status(self, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair
}
return self.request_get("user/wallet", headers=self.headers, params=params)
def assign_btc_address(self, currency="btc"):
payload = {
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/address/assign", headers=self.headers, data=payload)
def request_btc_withdrawal(self, address, amount, currency="btc"):
payload = {
'address': address,
'amount': amount,
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/out", headers=self.headers, data=payload)
def status_of_btc_deposit_and_transfer(self, transfer_id="", currency="btc"):
params = {
'currency': currency
}
if transfer_id != "":
params['id'] = transfer_id
return self.request_get("user/coins/status", headers=self.headers, params=params)
def cancel_btc_transfer_request(self, transfer_id, currency="btc"):
payload = {
'id': transfer_id,
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/out/cancel", headers=self.headers, data=payload)
@property
def nonce(self):
return int(time.time() * 1000)
|
youdar/work | work/FAB/cross/fab_elbow_angle.py | Python | mit | 5,657 | 0.012374 | from __future__ import division
from scitbx.linalg import eigensystem
from scitbx.array_family import flex
from libtbx.utils import null_out
from math import acos,pi
from iotbx import pdb
import iotbx.pdb
class fab_elbow_angle(object):
def __init__(self,
pdb_hierarchy,
chain_ID_light='L',
chain_ID_heavy='H',
limit_light=107,
limit_heavy=113):
'''
Get elbow angle for Fragment antigen-binding (Fab)
- Default heavy and light chains IDs are: H : heavy, L : light
- Default limit (cutoff) between variable and constant parts
is residue number 107/113 for light/heavy chains
- Va | riable domain is from residue 1 to limit.
Constant domain form limit+1 to end.
- Method of calculating angle is based on Stanfield, et al., JMB 2006
'''
# create selection strings for the heavy/light var/const part of cha | ins
self.select_str(
chain_ID_H=chain_ID_heavy,
limit_H=limit_heavy,
chain_ID_L=chain_ID_light,
limit_L=limit_light)
# get the hirarchy for and divide using selection strings
self.pdb_hierarchy = pdb_hierarchy
self.get_pdb_chains()
# Get heavy to light reference vector before alignment !!!
vh_end = self.pdb_var_H.atoms()[-1].xyz
vl_end = self.pdb_var_L.atoms()[-1].xyz
mid_H_to_L = self.norm_vec(start=vh_end,end=vl_end)
# Get transformations objects
tranformation_const= self.get_transformation(
fixed_selection=self.pdb_const_H,
moving_selection=self.pdb_const_L)
tranformation_var = self.get_transformation(
fixed_selection=self.pdb_var_H,
moving_selection=self.pdb_var_L)
# Get the angle and eigenvalues
eigen_const = eigensystem.real_symmetric(tranformation_const.r.as_sym_mat3())
eigen_var = eigensystem.real_symmetric(tranformation_var.r.as_sym_mat3())
# c : consttant, v : variable
eigenvectors_c = self.get_eigenvector(eigen_const)
eigenvectors_v = self.get_eigenvector(eigen_var)
# test eignevectors pointing in oposite directions
if eigenvectors_c.dot(eigenvectors_v) > 0:
eigenvectors_v = - eigenvectors_v
# Calc Feb elbow angle
angle = self.get_angle(vec1=eigenvectors_c, vec2=eigenvectors_v)
# Test if elbow angle larger or smaller than 180
zaxis = self.cross(eigenvectors_v, eigenvectors_c)
xaxis = self.cross(eigenvectors_c,zaxis)
# choose ref axis
ref_axis = zaxis
#if abs(mid_H_to_L.dot(xaxis)) > abs(mid_H_to_L.dot(zaxis)):
#ref_axis = xaxis
if mid_H_to_L.dot(ref_axis) < 0:
angle = 360 - angle
self.fab_elbow_angle = angle
def norm_vec(self,start,end):
'''retruns normalized vector that starts at "stat" and ends at "end"'''
x = flex.double(end) - flex.double(start)
return x/x.norm()
def cross(self,a,b):
'''(array,array) -> array
returns a normalized cross product vector'''
a1,a2,a3 = a
b1,b2,b3 = b
x = flex.double([a2*b3-a3*b2,a3*b1-a1*b3,a1*b2-a2*b1])
return x/x.norm()
def get_angle(self,vec1,vec2,larger=True):
'''retrun the larger angle between vec1 and vec2'''
if vec1 and vec1:
angle_cos = vec1.dot(vec2)
acos_angle_cos = acos(angle_cos)
assert acos_angle_cos != 0
angle = 180/pi*acos_angle_cos
else:
angle = 0
if (angle < 90) and larger: angle = 180 - angle
if (angle > 90) and not larger: angle = 180 - angle
return angle
def get_eigenvector(self,eigen):
'''
Get the eigen vector for eigen value 1 and normalize it
'''
v = eigen.vectors()
e = eigen.values()
indx = None
# select eigenvector that corespondes to a real egienvalue == 1
for i,x in enumerate(e):
if not isinstance(x,complex):
if abs(1-x)<1e-6:
indx = i
break
# make sure we have egienvalue == 1
assert not indx
eigenvector = v[indx:indx+3]
# normalize
eigenvector = eigenvector / eigenvector.dot(eigenvector)
if e.all_eq(flex.double([1,1,1])):
eigenvector = None
return eigenvector
def get_pdb_chains(self):
'''Create seperate pdb hierarchy for each on the chains we want to align'''
ph = self.pdb_hierarchy
# test selection
test = ph.atom_selection_cache().selection
#
self.pdb_var_H = ph.select(test(self.select_var_str_H))
self.pdb_const_H = ph.select(test(self.select_const_str_H))
self.pdb_var_L = ph.select(test(self.select_var_str_L))
self.pdb_const_L = ph.select(test(self.select_const_str_L))
def get_transformation(self,fixed_selection,moving_selection):
from phenix.command_line import superpose_pdbs
'''
Align the moving pdb hierarchy on to the fixed one.
Provides an object with rotation and translation info
'''
params = superpose_pdbs.master_params.extract()
x = superpose_pdbs.manager(
params,
log=null_out(),
write_output=False,
save_lsq_fit_obj=True,
pdb_hierarchy_fixed=fixed_selection,
pdb_hierarchy_moving=moving_selection)
return x.lsq_fit_obj
def select_str(self,chain_ID_H,limit_H,chain_ID_L,limit_L):
'''create selection strings for the heavy and light chains
seperating the vairable and constant parts of the chains'''
s1 = 'pepnames and (name ca or name n or name c) and altloc " "'
s2 = 'chain {0} and resseq {1}:{2} and {3}'
self.select_var_str_H = s2.format(chain_ID_H,1,limit_H,s1)
self.select_const_str_H = s2.format(chain_ID_H,limit_H+1,'end',s1)
self.select_var_str_L = s2.format(chain_ID_L,1,limit_L,s1)
self.select_const_str_L = s2.format(chain_ID_L,limit_L+1,'end',s1)
|
BenThelen/python-refprop | python2.7/rptest.py | Python | bsd-3-clause | 21,437 | 0.004851 | #-------------------------------------------------------------------------------
#Name: rptest
#Purpose: test module for refprop and multiRP
#
#Author: Thelen, B.J.
# thelen_ben@yahoo.com
#-------------------------------------------------------------------------------
u'''Allow refprop and multiRP module functional test of all functions'''
####################################################### test if some windows functions are working now with rp9.1
from decimal import Decimal
import platform
def settest(test):
u'''set test module
'refprop' or 'multiRP'
and execute test run'''
if test == u'refprop':
import refprop as rp
_maintest(rp)
elif test == u'multiRP':
import multiRP as rp
_maintest(rp)
#main test def. for usage at refprop and multiRP
def _maintest(rp):
#examples and test setup
rp.SetErrorDebug.off() #turn on =>> for testing purpose
if rp.test(): #if True; rptest =>>for testing purpose
print u'refprop installed correctely'
print u'test results'
print rp.testresult
print u'fluidlib'
rp.fluidlib()
print u'\n'
prop = rp.setup(u'def', u'air',)
print u'setup air'
print prop, u'\n'
x = prop[u'x']
print u'critp(x)'
print rp.critp(x), u'\n'
print u'setup water ammonia'
print rp.setup(u'def', u'water', u'ammonia',), u'\n'
#alternative setup input
rp.setup(u'def', [u'water', u'ammonia'],)
x = [0.5, 0.3]
prop = rp.normalize(x)
x = prop[u'x']
prop = rp.critp(x)
prop = rp.therm(prop[u'tcrit'], prop[u'Dcrit'], x)
print u'therm'
print prop, u'\n'
p = prop[u'p']
print u'therm2'
print rp.therm2(prop[u't'], prop[u'D'], x), u'\n'
print u'therm0'
print rp.therm0(prop[u't'], prop[u'D'], x), u'\n'
print u'residual'
print rp.residual(prop[u't'], prop[u'D'], x), u'\n'
print u'entro'
print rp.entro(prop[u't'], prop[u'D'], x), u'\n'
print u'enthal'
print rp.enthal(prop[u't'], prop[u'D'], x), u'\n'
print u'ag'
print rp.ag(prop[u't'], prop[u'D'], x), u'\n'
print u'cvcp'
print rp.cvcp(prop[u't'], prop[u'D'], x), u'\n'
print u'dddp'
print rp.dddp(prop[u't'], prop[u'D'], x), u'\n'
print u'dddt'
print rp.dddt(prop[u't'], prop[u'D'], x), u'\n'
print u'dhd1'
print rp.dhd1(prop[u't'], prop[u'D'], x), u'\n'
print u'dpdd'
print rp.dpdd(prop[u't'], prop[u'D'], x), u'\n'
print u'dpdd2'
print rp.dpdd2(prop[u't'], prop[u'D'], x), u'\n'
print u'dpdt'
print rp.dpdt(prop[u't'], prop[u'D'], x), u'\n'
D = prop[u'D']
#function not supported in Windows
if platform.system() == u'Linux':
print u'dcdt'
print rp.dcdt(prop[u't'], x), u'\n'
#function not supported in Windows
if platform.system() == u'Linux':
print u'dcdt2'
print rp.dcdt2(prop[u't'], x), u'\n'
print u'fgcty'
print rp.fgcty(prop[u't'], D, x), u'\n'
print u'gibbs'
print rp.gibbs(prop[u't'], prop[u'D'], x), u'\n'
#~ print('fgcty2')
#~ print(rp.fgcty2(prop['t'], prop['D'], x), '\n')
prop = rp.therm3(prop[u't'], prop[u'D'], x)
print u'therm3'
print prop, u'\n'
D = prop[u'D']
print u'virb'
print rp.virb(prop[u't'], x), u | '\n'
print u'virc'
print rp.virc(prop[u't'], x), u'\n'
#function not supported in Windows
if platform.system() == u'Linux':
print u'vird'
print rp.vird(prop[u't'], x), u'\n'
print u'virba'
print rp.virba(prop[u't'], x), u'\n'
print u'virca'
print rp.virca(prop[u't'], x), u'\n'
print u'cvc | pk'
print rp.cvcpk(1, prop[u't'], D), u'\n'
print u'dbdt'
print rp.dbdt(prop[u't'], x), u'\n'
print u'dpddk'
print rp.dpddk(1, prop[u't'], D), u'\n'
print u'dpdtk'
print rp.dpdtk(2, prop[u't'], D), u'\n'
D = 55
t = 373
prop = rp.press(t, D, x)
print u'press'
print prop, u'\n'
p = prop[u'p']
print u'purefld(1)'
prop = rp.purefld(1)
print prop, u'\n'
x = [1]
resetup_test_prop_d = prop
print u'satt'
prop = rp.satt(t, x)
print prop, u'\n'
print u'satp'
prop = rp.satp(prop[u'p'], x)
print prop, u'\n'
print u'satd'
print rp.satd(prop[u'Dliq'], x), u'\n'
print u'sath'
print rp.sath(47000, x, 0), u'\n'
print u'sate'
print rp.sate(0.46047E-13, x), u'\n'
print u'sats'
print rp.sats(50, x, 0), u'\n'
print u'purefld(0)'
print rp.purefld(0), u'\n'
x = [0.5, 0.3]
x = rp.normalize(x)[u'x']
print u'csatk'
print rp.csatk(1, t), u'\n'
print u'dptsatk'
print rp.dptsatk(1, t), u'\n'
print u'cv2pk'
print rp.cv2pk(2, t, D), u'\n'
print u'tprho'
print rp.tprho(t, p, x, 2, 1, 58), u'\n'
print u'flsh, tp'
prop = rp.flsh(u'tp', t, p, x)
print prop, u'\n'
print u'flsh, th'
print rp.flsh(u'tH', 305, prop[u'h'], x, 1), u'\n'
print u'flsh, tD'
print rp.flsh(u'tD', t, 30, x), u'\n'
print u'info()'
print rp.info(), u'\n'
print u'info(2)'
print rp.info(2), u'\n'
#unsupported in Windows
if platform.system() == u'Linux':
print u'rmix2'
print rp.rmix2(x), u'\n'
print u'xmass'
prop = rp.xmass(x)
print prop, u'\n'
print u'xmole'
print rp.xmole(prop[u'xkg']), u'\n'
print u'limitx'
print rp.limitx(x, u'eos', t, D, p), u'\n'
print u'limitk'
print rp.limitk(u'eos', 1, t, D, p), u'\n'
print u'limits'
print rp.limits(x), u'\n'
print u'flsh, ts'
prop = rp.flsh(u'ts', t, 40, x)
print prop, u'\n'
print u'flsh, te'
print rp.flsh(u'te', t, prop[u'e'], x), u'\n'
print u'flsh, pD'
prop = rp.flsh(u'Pd', p, D, x)
print prop, u'\n'
print u'flsh, ph'
prop = rp.flsh(u'ph', p, prop[u'h'], x)
print prop, u'\n'
print u'flsh, ps'
prop = rp.flsh(u'ps', p, prop[u's'], x)
print prop, u'\n'
print u'flsh, pe'
prop = rp.flsh(u'pE', p, prop[u'e'], x)
print prop, u'\n'
print u'flsh, es'
prop = rp.flsh(u'es', prop[u'e'], prop[u's'], x)
print prop, u'\n'
print u'flsh, hs'
prop = rp.flsh(u'hs', 40000, 100, x)
print prop, u'\n'
print u'flsh, es'
print rp.flsh(u'es', 175, 13, x), u'\n'
print u'flsh, Dh'
print rp.flsh(u'DH', 20, 18000, x), u'\n'
print u'flsh, Ds'
prop = rp.flsh(u'Ds', 20, 50, x)
print prop, u'\n'
print u'flsh, De'
prop = rp.flsh(u'DE', 20, prop[u'e'], x)
print prop, u'\n'
print u'flsh, tq'
prop = rp.flsh(u'tq', t, prop[u'q'], x)
print prop, u'\n'
print u'flsh, pq'
print rp.flsh(u'pq', 1200, prop[u'q'], x), u'\n'
prop = rp.flsh(u'tp', 350, 1200, x)
print u'flsh, tp'
print prop, u'\n'
s = prop[u's']
e = prop[u'e']
h = prop[u'h']
D = prop[u'D']
t = prop[u't']
p = prop[u'p']
Dmin = 40
Dmax = 55
print u'flsh1, liq, ph'
print rp.flsh1(u'Ph', p, h, x, 1), u'\n'
print u'getphase'
print rp.getphase(prop), u'\n'
print u'flsh1, liq, pD'
print rp.flsh1(u'PD', p, D, x), u'\n'
print u'flsh1, liq, ps'
print rp.flsh1(u'Ps', p, s, x), u'\n'
#unsupported in Windows
if platform.system() == u' |
pri22296/yaydoc | modules/scripts/config/validation.py | Python | gpl-3.0 | 626 | 0.011182 | import re
import mimetypes
def | validate_markdown_flavour(value):
return value in ('markdown', 'markdown_strict', 'markdown_phpextra',
'markdown_github', 'markdown_mmd', 'commonmark')
def validate_mimetype_image(value):
# Empty string is also valid
if not value:
return True
mimetype = mimetypes.guess_type(value)[0]
if mimetype is None:
return False |
else:
return mimetype.startswith('image')
def validate_subproject(value):
regex = '(http|https)://(www.|)github.com/([\w\d\.]+)/([\w\d\.]+)(.git|)'
return re.match(regex, value['url']) != None
|
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/managed_instance_groups/update_instances_utils.py | Python | mit | 5,956 | 0.00638 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the instance-groups managed update-instances commands."""
import re
from googlecloudsdk.calliope import exceptions
STANDBY_NAME = 'standby'
TARGET_SIZE_NAME = 'target-size'
TEMPLATE_NAME = 'template'
def _ParseFixed(fixed_or_percent_str):
"""Retrieves int value from string."""
if re.match(r'^\d+$', fixed_or_percent_str):
return int(fixed_or_percent_str)
return None
def _ParsePercent(fixed_or_percent_str):
"""Retrieves percent value from string."""
if re.match(r'^\d+%$', fixed_or_percent_str):
percent = int(fixed_or_percent_str[:-1])
return percent
return None
def ParseFixedOrPercent(flag_name, flag_param_name,
fixed_or_percent_str, messages):
"""Retrieves value: number or percent.
Args:
flag_name: name of the flag associated with the parsed string.
flag_param_name: name of the inner parameter of the flag.
fixed_or_percent_s | tr: string containing fixed or percent value.
messages: module containing message classes.
Returns:
FixedOrPercent message object.
"""
if fixed_or_percent_str is None:
return None
fixed = _ParseFixed(fixed_or_percent_str)
if fixed is not None:
return messages.FixedOrPercent(fixed=fixed)
percent = | _ParsePercent(fixed_or_percent_str)
if percent is not None:
if percent > 100:
raise exceptions.InvalidArgumentException(
flag_name, 'percentage cannot be higher than 100%.')
return messages.FixedOrPercent(percent=percent)
raise exceptions.InvalidArgumentException(
flag_name,
flag_param_name + ' has to be non-negative integer number or percent.')
def ParseUpdatePolicyType(flag_name, policy_type_str, messages):
"""Retrieves value of update policy type: opportunistic or proactive.
Args:
flag_name: name of the flag associated with the parsed string.
policy_type_str: string containing update policy type.
messages: module containing message classes.
Returns:
InstanceGroupManagerUpdatePolicy.TypeValueValuesEnum message enum value.
"""
if policy_type_str == 'opportunistic':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.OPPORTUNISTIC)
elif policy_type_str == 'proactive':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.PROACTIVE)
raise exceptions.InvalidArgumentException(flag_name, 'unknown update policy.')
def ValidateUpdateInstancesArgs(args):
"""Validates update arguments provided by the user.
Args:
args: arguments provided by the user.
"""
if args.action == 'restart':
if args.version_original:
raise exceptions.InvalidArgumentException(
'--version-original', 'can\'t be specified for --action restart.')
if args.version_new:
raise exceptions.InvalidArgumentException(
'--version-new', 'can\'t be specified for --action restart.')
elif args.action == 'replace':
if not args.version_new:
raise exceptions.RequiredArgumentException(
'--version-new',
'must be specified for --action replace (or default).')
if not args.version_original and (TARGET_SIZE_NAME in args.version_new):
if args.version_new[TARGET_SIZE_NAME] == '100%':
del args.version_new[TARGET_SIZE_NAME]
else:
raise exceptions.InvalidArgumentException(
'--version-new',
'target-size can\'t be specified if there is only one version.')
if (args.version_original and args.version_new and
(TARGET_SIZE_NAME in args.version_original)
== (TARGET_SIZE_NAME in args.version_new)):
raise exceptions.ToolException(
'Exactly one version must have the target-size specified.')
def ParseVersion(flag_name, version_map, resources, messages):
"""Retrieves version from input map.
Args:
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
resources: provides reference for instance template resource.
messages: module containing message classes.
Returns:
InstanceGroupManagerVersion message object.
"""
if TEMPLATE_NAME not in version_map:
raise exceptions.InvalidArgumentException(flag_name,
'template has to be specified.')
template_ref = resources.Parse(
version_map[TEMPLATE_NAME], collection='compute.instanceTemplates')
if TARGET_SIZE_NAME in version_map:
target_size = ParseFixedOrPercent(flag_name, TARGET_SIZE_NAME,
version_map[TARGET_SIZE_NAME], messages)
else:
target_size = None
name = version_map.get('name')
return messages.InstanceGroupManagerVersion(
instanceTemplate=template_ref.SelfLink(),
targetSize=target_size,
name=name)
def ValidateCanaryVersionFlag(flag_name, version_map):
"""Retrieves canary version from input map.
Args:
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
"""
if version_map and TARGET_SIZE_NAME not in version_map:
raise exceptions.RequiredArgumentException(
'{} {}={}'.format(flag_name, TARGET_SIZE_NAME,
TARGET_SIZE_NAME.upper()),
'target size must be specified for canary version')
|
ezequielpereira/Time-Line | timelinelib/xml/parser.py | Python | gpl-3.0 | 9,465 | 0.000106 | # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
"""
A simple, validating, SAX-based XML parser.
Since it is simple, it has some limitations:
- It can not parse attributes
- It can not parse arbitrary nested structures
- It can only parse text in leaf nodes: in other words, this piece of XML
is not possible to parse: <a>some text <b>here</b> and there</a>
Here's an example how to parse a simple XML document using this module.
First we create a file-like object containing the XML data (any file-like
object is fine, but we create a StringIO for the purpose of making a working
example):
>>> from StringIO import StringIO
>>> xml_stream = StringIO('''
... <db>
... <person>
... <name>Rickard</name>
... </person>
... <person>
... <name>James</name>
... <age>38</age>
... </person>
... </db>
... ''')
Then we define two parser functions that we later associate with Tag objects.
Parse functions are called when the end tag has been read. The first argument
to a parse function is the text that the tag contains. It will be empty for all
tags except leaf tags. The second argument is a dictionary that can be used to
store temporary variables. This dictionary is passed to all parse functions,
providing a way to share information between parse functions.
>>> def parse_name(text, tmp_dict):
... tmp_dict["tmp_name"] = text
>>> def parse_person(text, tmp_dict):
... # text is empty here since person is not a leaf tag
... name = tmp_dict.pop("tmp_name")
... age = tmp_dict.pop("tmp_age", None)
... print("Found %s in db." % name)
... if age is not None:
... print("%s is %s years old." % (name, age))
Next we define the structure of the XML document that we are going to parse by
creating Tag objects. The first argument is the name of the tag, the second
specifies how many times it can occur inside its parent (should be one of
SINGLE, OPTIONAL, or ANY), the third argument is the parse function to be used
for this tag (can be None if no parsing is needed), and the fourth argument is
a list of child tags.
>>> root_tag = Tag("db", SINGLE, None, [
... Tag("person", ANY, parse_person, [
... Tag("name", SINGLE, parse_name),
... Tag("age", OPTIONAL, parse_fn_store("tmp_age")),
... ]),
... ])
The parse_fn_store function returns a parser function that works exactly like
parse_name: it takes the text of the tag and stores it in the dictionary with
the given key (tmp_age in this case).
The last step is to call the parse function with the stream, the tag
configuration, and a dictionary. The dictionary can be populated with values
before parsing starts if needed.
>>> parse(xml_stream, root_tag, {})
Found Rickard in db.
Found James in db.
James is 38 years old.
The parse function will raise a ValidationError if the XML is not valid and a
SAXException the if the XML is not well-formed.
"""
from xml.sax import parse as sax_parse
import sys
import xml.sax.handl | er
# Occurrence rules for tags
SINGLE = 1
OPTIONAL = 2
ANY = 3
class ValidationError(Exception):
"""Raised when parsed xml document doe | s not follow the schema."""
pass
class Tag(object):
"""
Represents a tag in an xml document.
Used to define structure of an xml document and define parser functions for
individual parts of an xml document.
Parser functions are called when the end tag has been read.
See SaxHandler class defined below to see how this class is used.
"""
def __init__(self, name, occurrence_rule, parse_fn, child_tags=[]):
self.name = name
self.occurrence_rule = occurrence_rule
self.parse_fn = parse_fn
self.child_tags = []
self.add_child_tags(child_tags)
self.parent = None
# Variables defining state
self.occurrences = 0
self.next_possible_child_pos = 0
self.start_read = False
def add_child_tags(self, tags):
for tag in tags:
self.add_child_tag(tag)
def add_child_tag(self, tag):
tag.parent = self
self.child_tags.append(tag)
def read_enough_times(self):
return self.occurrences > 0 or self.occurrence_rule in (OPTIONAL, ANY)
def can_read_more(self):
return self.occurrences == 0 or self.occurrence_rule == ANY
def handle_start_tag(self, name, tmp_dict):
if name == self.name:
if self.start_read is True:
# Nested tag
raise ValidationError("Did not expect <%s>." % name)
else:
self.start_read = True
return self
elif self.start_read is True:
next_child = self._find_next_child(name)
return next_child.handle_start_tag(name, tmp_dict)
else:
raise ValidationError("Expected <%s> but got <%s>."
% (self.name, name))
def handle_end_tag(self, name, text, tmp_dict):
self._ensure_end_tag_valid(name, text)
if self.parse_fn is not None:
self.parse_fn(text, tmp_dict)
self._ensure_all_children_read()
self._reset_parse_data()
self.occurrences += 1
return self.parent
def _ensure_end_tag_valid(self, name, text):
if name != self.name:
raise ValidationError("Expected </%s> but got </%s>."
% (self.name, name))
if self.child_tags:
if text.strip():
raise ValidationError("Did not expect text but got '%s'."
% text)
def _ensure_all_children_read(self):
num_child_tags = len(self.child_tags)
while self.next_possible_child_pos < num_child_tags:
child = self.child_tags[self.next_possible_child_pos]
if not child.read_enough_times():
raise ValidationError("<%s> not read enough times."
% child.name)
self.next_possible_child_pos += 1
def _reset_parse_data(self):
for child_tag in self.child_tags:
child_tag.occurrences = 0
self.next_possible_child_pos = 0
self.start_read = False
def _find_next_child(self, name):
num_child_tags = len(self.child_tags)
while self.next_possible_child_pos < num_child_tags:
child = self.child_tags[self.next_possible_child_pos]
if child.name == name:
if child.can_read_more():
return child
else:
break
else:
if child.read_enough_times():
self.next_possible_child_pos += 1
else:
break
raise ValidationError("Did not expect <%s>." % name)
class SaxHandler(xml.sax.handler.ContentHandler):
def __init__(self, root_tag, tmp_dict):
self.tag_to_parse = root_tag
self.tmp_dict = tmp_dict
self.text = ""
def startElement(self, name, attrs):
"""
Called when a start tag has been read.
"""
if attrs.getLength() > 0:
raise ValidationError("Did not expect attributes on <%s>." % name)
if self.text.strip():
raise ValidationError("Did not expect text but got '%s'."
|
gustavemichel/IEEEXtreme10-Technomancers | P23 - P is NP/PisNP.py | Python | gpl-3.0 | 555 | 0.005405 | import sys
input_data = sys.stdin.readlines()
static = input_dat | a.pop(0).rstrip().split()
teams = int(static[0])
pizza = int(static[1])
nopizza= int(static[2])
input_data.pop(0)
pizza_scores = input_data[0:pizza]
input_data = input_data[pizza:]
pizza_scores = [-1 if x.rstrip() == '?' else int(x.rstrip()) for x in pizza_scores]
input_data.pop(0)
nopizza_scores = input_data[0:nopizza]
input_data = input_data[nopizza:]
nopizza_scores = [- | 1 if x.rstrip() == '?' else int(x.rstrip()) for x in nopizza_scores]
print pizza_scores
print nopizza_scores
|
SickGear/SickGear | sickbeard/providers/torrentday.py | Python | gpl-3.0 | 7,671 | 0.00352 | # coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import re
import time
from . import generic
from ..helpers import anon_url, try_int
from bs4_parser import BS4Parser
from _23 import b64decodestring
from six import iteritems
class TorrentDayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'TorrentDay')
self.url_home = ['https://www.torrentday.com/'] + \
['http://td.%s/' % b64decodestring(x) for x in [''.join(x) for x in [
[re.sub(r'(?i)[I\s1]+', '', x[::-1]) for x in [
'y92d', 'zl12a', 'y9mY', 'n5 Wa', 'vNmIL', '=i1=Qb']],
[re.sub(r'(?i)[T\sq]+', '', x[::-1]) for x in [
'15TWd', 'hV 3c', 'lBHb', 'vNncq', 'j5ib', '=qQ02b']],
]]]
self.url_vars = {'login': 'rss.php', 'search': 't?%s%s&qf=&p=%s&q=%s'}
self.url_tmpl = {'config_provider_home_uri': '%(home)s', 'login': '%(home)s%(vars)s',
'search': '%(home)s%(vars)s'}
self.categories = {'Season': [31, 33, 14], 'Episode': [24, 32, 26, 7, 34, 2], 'anime': [29]}
self.categories['Cache'] = self.categories['Season'] + self.categories['Episode']
self.proper_search_terms = None
self.digest, self.freeleech, self.minseed, self.minleech = 4 * [None]
def _authorised(self, **kwargs):
| return super(TorrentDayProvider, self)._authorised(
logged_in=(lambda y='': all(
['RSS URL' in y, self.has_all_cookies()] +
[(self.session.cookies.get(c, doma | in='') or 'sg!no!pw') in self.digest
for c in ('uid', 'pass')])),
failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings'))
@staticmethod
def _has_signature(data=None):
return generic.TorrentProvider._has_signature(data) or \
(data and re.search(r'(?i)<title[^<]+?(td|torrentday)', data))
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
last_recent_search = self.last_recent_search
last_recent_search = '' if not last_recent_search else last_recent_search.replace('id-', '')
for mode in search_params:
urls = []
for search_string in search_params[mode]:
search_string = '+'.join(search_string.split())
urls += [[]]
for page in range((3, 5)['Cache' == mode])[1:]:
urls[-1] += [self.urls['search'] % (self._categories_string(mode, '%s=on'),
('&free=on', '')[not self.freeleech], page, search_string)]
results += self._search_urls(mode, last_recent_search, urls)
last_recent_search = ''
return results
def _search_urls(self, mode, last_recent_search, urls):
results = []
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in iteritems(dict(get='download', id=r'download.*?/([\d]+)')))
lrs_found = False
lrs_new = True
for search_urls in urls: # this intentionally iterates once to preserve indentation
for search_url in search_urls:
html = self.get_url(search_url)
if self.should_skip():
return results
cnt = len(items[mode])
cnt_search = 0
log_settings_hint = False
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, tag='table', attr='torrentTable') as soup:
tbl = soup.find('table', id='torrentTable')
tbl_rows = [] if not tbl else tbl.find_all('tr')
if 2 > len(tbl_rows):
raise generic.HaltParseException
if 'Cache' == mode and 100 > len(tbl_rows):
log_settings_hint = True
head = None
for tr in tbl_rows[1:]:
cells = tr.find_all('td')
if 4 > len(cells):
continue
cnt_search += 1
try:
head = head if None is not head else self._header_row(
tr, header_strip='(?i)(?:leechers|seeders|size);')
dl = tr.find('a', href=rc['get'])['href']
dl_id = rc['id'].findall(dl)[0]
lrs_found = dl_id == last_recent_search
if lrs_found:
break
seeders, leechers, size = [try_int(n, n) for n in [
cells[head[x]].get_text().strip() for x in ('seed', 'leech', 'size')]]
if self._reject_item(seeders, leechers):
continue
title = tr.find('a', href=re.compile('/t/%s' % dl_id)).get_text().strip()
download_url = self._link(dl)
except (AttributeError, TypeError, ValueError, IndexError):
continue
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
except generic.HaltParseException:
pass
except (BaseException, Exception):
time.sleep(1.1)
self._log_search(mode, len(items[mode]) - cnt, search_url, log_settings_hint)
if self.is_search_finished(mode, items, cnt_search, rc['id'], last_recent_search, lrs_new, lrs_found):
break
lrs_new = False
results = self._sort_seeding(mode, results + items[mode])
return results
def _episode_strings(self, ep_obj, **kwargs):
return super(TorrentDayProvider, self)._episode_strings(ep_obj, sep_date='.', date_or=True, **kwargs)
def ui_string(self, key):
cookies = 'use... \'uid=xx; pass=yy\''
if 'cookie_str_only' == key:
return cookies
if 'torrentday_digest' == key and self._valid_home():
current_url = getattr(self, 'urls', {}).get('config_provider_home_uri')
return (cookies + (current_url and ('<br>from a session logged in at <a target="_blank" href="%s">%s</a>' %
(anon_url(current_url), current_url.strip('/'))) or ''))
return ''
provider = TorrentDayProvider()
|
twitter/pants | tests/python/pants_test/backend/jvm/tasks/test_checkstyle.py | Python | apache-2.0 | 5,076 | 0.005122 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from textwrap import dedent
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.checkstyle import Checkstyle
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants_test.task_test_base import ensure_cached
class CheckstyleTest(NailgunTaskTestBase):
"""Tests for the class Checkstyle."""
_RULE_XML_FILE_TAB_CHECKER = dedent("""
<module name="FileTabCharacter"/>
""")
_RULE_XML_SUPPRESSION_FILTER = dedent("""
<module name="SuppressionFilter">
<property name="file" value="${checkstyle.suppression.file}"/>
</module>
""")
_TEST_JAVA_SOURCE_WITH_NO_TAB = dedent("""
public class HelloMain {
public static void main(String[] args) throws IOException {
System.out.println("A test.");
}
}
""")
_TEST_JAVA_SOURCE_WITH_TAB = dedent("""
public class HelloMain {
public static void main(String[] args) throws IOException {
\tSystem.out.println("A test.");
}
}
""")
@classmethod
def task_type(cls):
return Checkstyle
def _create_context(self, rules_xml=(), properties=None, target_roots=None):
return self.context(
options={
self.options_scope: {
'bootstrap_tools': ['//:checkstyle'],
'configuration': self._create_config_file(rules_xml),
'properties': properties or {},
}
},
target_roots=target_roots)
def _create_config_file(self, rules_xml=()):
return self.create_file(
relpath='coding_style.xml',
contents=dedent(
"""<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
{rules_xml}
</module>""".format(rules_xml='\n'.join(rules_xml))))
def _create_suppression_file(self, suppresses_xml=()):
return self.create_file(
relpath='suppression.xml',
contents=dedent(
"""<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions | 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
{suppresses_xml}
</suppressions>
""".format(suppresses_xml='\n'.join(suppresses_xml))))
def _create_target(self, name, test_java_source):
rel_dir = os.path.join('src/java', name)
self.create_file(relpath | =os.path.join(rel_dir, '{name}.java'.format(name=name)),
contents=test_java_source)
return self.make_target(Address(spec_path=rel_dir, target_name=name).spec,
JavaLibrary,
sources=['{}.java'.format(name)])
#
# Test section
#
@ensure_cached(Checkstyle, expected_num_artifacts=1)
def test_single_rule_pass(self):
no_tab = self._create_target('no_tab', self._TEST_JAVA_SOURCE_WITH_NO_TAB)
context = self._create_context(rules_xml=[self._RULE_XML_FILE_TAB_CHECKER],
target_roots=[no_tab])
self.populate_runtime_classpath(context=context)
self.execute(context)
@ensure_cached(Checkstyle, expected_num_artifacts=0)
def test_single_rule_fail(self):
with_tab = self._create_target('with_tab', self._TEST_JAVA_SOURCE_WITH_TAB)
context = self._create_context(rules_xml=[self._RULE_XML_FILE_TAB_CHECKER],
target_roots=[with_tab])
# add a tab in the source to trigger the tab check rule to fail.
self.populate_runtime_classpath(context=context)
with self.assertRaises(TaskError):
self.execute(context)
def test_suppressions(self):
# For this test, we:
# - add 3 java files, 2 with tabs, 1 without.
# - add 2 suppression rules against those 2 java files with tabs,
# so we can test the logic of suppression.
suppression_file = self._create_suppression_file(
[
'<suppress files=".*with_tab_1\.java" checks=".*" />',
'<suppress files=".*with_tab_2\.java" checks=".*" />',
])
no_tab = self._create_target('no_tab', self._TEST_JAVA_SOURCE_WITH_NO_TAB)
with_tab_1 = self._create_target('with_tab_1', self._TEST_JAVA_SOURCE_WITH_TAB)
with_tab_2 = self._create_target('with_tab_2', self._TEST_JAVA_SOURCE_WITH_TAB)
context = self._create_context(
rules_xml=[
self._RULE_XML_SUPPRESSION_FILTER,
self._RULE_XML_FILE_TAB_CHECKER
],
properties={
'checkstyle.suppression.file': suppression_file,
},
target_roots=[no_tab, with_tab_1, with_tab_2])
self.populate_runtime_classpath(context=context)
self.execute(context)
|
xxn59/weat | order.py | Python | mit | 1,603 | 0.011853 | # -*- coding: ut | f-8 -*-
import time
from flask import Flask,g,request,make_response
import hashlib
import xml.etree.ElementTree as ET
app = Flask(__name__)
app.debug=True
@app.route('/',methods=['GET','POST'])
def wechat_auth():
if request.method == 'GET':
token='weatorg'
data = request.args
signature = data.get('signature','')
times | tamp = data.get('timestamp','')
nonce = data.get('nonce','')
# echostr = data.get('echostr','')
echostr = data.get('echostr')
print 'ech0str:',echostr
s = [timestamp,nonce,token]
s.sort()
s = ''.join(s)
if (hashlib.sha1(s).hexdigest() == signature):
print 'signature correct'
# return make_response('5838479218127813673')
make_response(echostr)
return 'ok'
else:
print 'signature err'
print hashlib.sha1(s).hexdigest()
print signature
return 'error'
else:
rec = request.stream.read()
xml_rec = ET.fromstring(rec)
tou = xml_rec.find('ToUserName').text
fromu = xml_rec.find('FromUserName').text
content = xml_rec.find('Content').text
xml_rep = "<xml><ToUserName><![CDATA[%s]]></ToUserName><FromUserName><![CDATA[%s]]></FromUserName><CreateTime>%s</CreateTime><MsgType><![CDATA[text]]></MsgType><Content><![CDATA[%s]]></Content><FuncFlag>0</FuncFlag></xml>"
response = make_response(xml_rep % (fromu,tou,str(int(time.time())), content))
response.content_type='application/xml'
return response
|
chubbymaggie/simuvex | simuvex/procedures/libc___so___6/__stack_chk_fail.py | Python | bsd-2-clause | 258 | 0.007752 | im | port simuvex
######################################
# __stack_chk_fail
######################################
class __stack_chk_fail(simuvex.SimProcedure):
NO_RET = True
def run(self, exit_code): #pylint:di | sable=unused-argument
return
|
kleientertainment/ds_mod_tools | pkg/win32/Python27/Lib/site-packages/clint/textui/__init__.py | Python | mit | 181 | 0 | # -*- coding: | utf-8 -*-
"""
clint.textui
| ~~~~~~~~~~~~
This module provides the text output helper system.
"""
from . import colored
from . import progress
from .core import *
|
zofuthan/edx-platform | openedx/core/djangoapps/course_groups/cohorts.py | Python | agpl-3.0 | 16,159 | 0.002166 | """
This file contains the logic for cohorts, as exposed internally to the
forums, and to the cohort admin views.
"""
import logging
import random
from django.db import transaction
from django.db.models.signals import post_save, m2m_changed
from django.dispatch import receiver
from django.http import Http404
from django.utils.translation import ugettext as _
from courseware import courses
from eventtracking import tracker
from request_cache.middleware import RequestCache
from student.models import get_user_by_username_or_email
from .models import CourseUserGroup, CourseCohort, CourseCohortsSettings, CourseUserGroupPartitionGroup
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseUserGroup)
def _cohort_added(sender, **kwargs):
"""Emits a tracking log event each time a cohort is created"""
instance = kwargs["instance"]
if kwargs["created"] and instance.group_type == CourseUserGroup.COHORT:
tracker.emit(
"edx.cohort.created",
{"cohort_id": instance.id, "cohort_name": instance.name}
)
@receiver(m2m_changed, sender=CourseUserGroup.users.through)
def _cohort_membership_changed(sender, **kwargs):
"""Emits a tracking log event each time cohort membership is modified"""
def get_event_iter(user_id_iter, cohort_iter):
return (
{"cohort_id": cohort.id, "cohort_name": cohort.name, "user_id": user_id}
for user_id in user_id_iter
for cohort in cohort_iter
)
action = kwargs["action"]
instance = kwargs["instance"]
pk_set = kwargs["pk_set"]
reverse = kwargs["reverse"]
if action == "post_add":
event_name = "edx.cohort.user_added"
elif action in ["post_remove", "pre_clear"]:
event_name = "edx.cohort.user_removed"
else:
return
if reverse:
user_id_iter = [instance.id]
if action == "pre_clear":
cohort_iter = instance.course_groups.filter(group_type=CourseUserGroup.COHORT)
else:
cohort_iter = CourseUserGroup.objects.filter(pk__in=pk_set, group_type=CourseUserGroup.COHORT)
else:
cohort_iter = [instance] if instance.group_type == CourseUserGroup.COHORT else []
if action == "pre_clear":
user_id_iter = (user.id for user in instance.users.all())
else:
user_id_iter = pk_set
for event in get_event_iter(user_id_iter, cohort_iter):
tracker.emit(event_name, event)
# A 'default cohort' is an auto-cohort that is automatically created for a course if no cohort with automatic
# assignment have been specified. It is intended to be used in a cohorted-course for users who have yet to be assigned
# to a cohort.
# Translation Note: We are NOT translating this string since it is the constant identifier for the "default group"
# and needed across product boundaries.
DEFAULT_COHORT_NAME = "Default Group"
# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even
# if and when that's fixed, it's a good idea to have a local generator to avoid any other
# code that messes with the global random module.
_local_random = None
def local_random():
"""
Get the local random number generator. In a function so that we don't run
random.Random() at import time.
"""
# ironic, isn't it?
global _local_random
if _local_random is None:
_local_random = random.Random()
return _local_random |
def is_course_cohorted(course_key):
"""
Give | n a course key, return a boolean for whether or not the course is
cohorted.
Raises:
Http404 if the course doesn't exist.
"""
return get_course_cohort_settings(course_key).is_cohorted
def get_cohort_id(user, course_key, use_cached=False):
"""
Given a course key and a user, return the id of the cohort that user is
assigned to in that course. If they don't have a cohort, return None.
"""
cohort = get_cohort(user, course_key, use_cached=use_cached)
return None if cohort is None else cohort.id
def get_cohorted_commentables(course_key):
"""
Given a course_key return a set of strings representing cohorted commentables.
"""
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted:
# this is the easy case :)
ans = set()
else:
ans = set(course_cohort_settings.cohorted_discussions)
return ans
@transaction.commit_on_success
def get_cohort(user, course_key, assign=True, use_cached=False):
"""Returns the user's cohort for the specified course.
The cohort for the user is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
Arguments:
user: a Django User object.
course_key: CourseKey
assign (bool): if False then we don't assign a group to user
use_cached (bool): Whether to use the cached value or fetch from database.
Returns:
A CourseUserGroup object if the course is cohorted and the User has a
cohort, else None.
Raises:
ValueError if the CourseKey doesn't exist.
"""
request_cache = RequestCache.get_request_cache()
cache_key = u"cohorts.get_cohort.{}.{}".format(user.id, course_key)
if use_cached and cache_key in request_cache.data:
return request_cache.data[cache_key]
request_cache.data.pop(cache_key, None)
# First check whether the course is cohorted (users shouldn't be in a cohort
# in non-cohorted courses, but settings can change after course starts)
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted:
return request_cache.data.setdefault(cache_key, None)
# If course is cohorted, check if the user already has a cohort.
try:
cohort = CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
users__id=user.id,
)
return request_cache.data.setdefault(cache_key, cohort)
except CourseUserGroup.DoesNotExist:
# Didn't find the group. If we do not want to assign, return here.
if not assign:
# Do not cache the cohort here, because in the next call assign
# may be True, and we will have to assign the user a cohort.
return None
# Otherwise assign the user a cohort.
course = courses.get_course(course_key)
cohorts = get_course_cohorts(course, assignment_type=CourseCohort.RANDOM)
if cohorts:
cohort = local_random().choice(cohorts)
else:
cohort = CourseCohort.create(
cohort_name=DEFAULT_COHORT_NAME,
course_id=course_key,
assignment_type=CourseCohort.RANDOM
).course_user_group
user.course_groups.add(cohort)
return request_cache.data.setdefault(cache_key, cohort)
def migrate_cohort_settings(course):
"""
Migrate all the cohort settings associated with this course from modulestore to mysql.
After that we will never touch modulestore for any cohort related settings.
"""
cohort_settings, created = CourseCohortsSettings.objects.get_or_create(
course_id=course.id,
defaults={
'is_cohorted': course.is_cohorted,
'cohorted_discussions': list(course.cohorted_discussions),
'always_cohort_inline_discussions': course.always_cohort_inline_discussions
}
)
# Add the new and update the existing cohorts
if created:
# Update the manual cohorts already present in CourseUserGroup
manual_cohorts = CourseUserGroup.objects.filter(
course_id=course.id,
group_type=CourseUserGroup.COHORT
).exclude(name__in=course.auto_cohort_groups)
for cohort in manual_cohorts:
CourseCohort.create(course_user_group=cohort)
for group_name in course.auto_cohort_groups:
CourseCohort.create(cohort_name=group_name, course_id=course.id, assignment_type=CourseCohort.RANDOM)
return cohort_sett |
kislyuk/aegea | aegea/top.py | Python | apache-2.0 | 1,383 | 0.007231 | import os, sys
from datetime import datetime
from typing import List
import boto3
import botocore.exceptions
from . import register_parser
from .util import ThreadPoolExecutor
from .util.printing import format_table, page_output
def get_stats_for_region(region):
try:
session = boto3.Session(region_name=region)
num_instances = len(list(session.resource("ec2").instances.all()))
num_amis = len(list(session.resource("ec2").images.filter(Owners=["self"])))
num_vpcs = len(list(session.resource("ec2").vpcs.all()))
num_enis = len(list(session.resource("ec2").network_interfaces.all()))
num_volumes = len(list(session.resource("ec2").volumes.all()))
except botocore.exceptions.ClientError:
num_instances, num_amis, num_vpcs, num_enis, num_volumes = ["Access denied"] * 5 # type: ignore
return [region, num_ | instances, num_amis, num_vpcs, num_enis, num_volumes]
def top(args):
table = [] # type: List[List]
columns = ["Region", "Instances", "AMIs", "VPCs", "Network interfaces", "EBS volumes"]
executor = ThreadPoolExecutor()
table = list(executo | r.map(get_stats_for_region, boto3.Session().get_available_regions("ec2")))
page_output(format_table(table, column_names=columns, max_col_width=args.max_col_width))
parser = register_parser(top, help='Show an overview of AWS resources per region')
|
ARISE-Initiative/robosuite | robosuite/wrappers/domain_randomization_wrapper.py | Python | mit | 9,076 | 0.002093 | """
This file implements a wrapper for facilitating domain randomization over
robosuite environments.
"""
import numpy as np
from robosuite.utils.mjmod import CameraModder, DynamicsModder, LightingModder, TextureModder
from robosuite.wrappers import Wrapper
DEFAULT_COLOR_ARGS = {
"geom_names": None, # all geoms are randomized
"randomize_local": True, # sample nearby colors
"randomize_material": True, # randomize material reflectance / shininess / specular
"local_rgb_interpolation": 0.2,
"local_material_interpolation": 0.3,
"texture_variations": ["rgb", "checker", "noise", "gradient"], # all texture variation types
"randomize_skybox": True, # by default, randomize skybox too
}
DEFAULT_CAMERA_ARGS = {
"camera_names": None, # all cameras are randomized
"randomize_position": True,
"randomize_rotation": True,
"randomize_fovy": True,
"position_perturbation_size": 0.01,
"rotation_perturbation_size": 0.087,
"fovy_perturbation_size": 5.0,
}
DEFAULT_LIGHTING_ARGS = {
"light_names": None, # all lights are randomized
"randomize_position": True,
"randomize_direction": True,
"randomize_specular": True,
"randomize_ambient": True,
"randomize_diffuse": True,
"randomize_active": True,
"position_perturbation_size": 0.1,
"direction_perturbation_size": 0.35,
"specular_perturbation_size": 0.1,
"ambient_perturbation_size": 0.1,
"diffuse_perturbation_size": 0.1,
}
DEFAULT_DYNAMICS_ARGS = {
# Opt parameters
"randomize_density": True,
"randomize_viscosity": True,
"density_perturbation_ratio": 0.1,
"viscosity_perturbation_ratio": 0.1,
# Body parameters
"body_names": None, # all bodies randomized
"randomize_position": True,
"randomize_quaternion": True,
"randomize_inertia": True,
"randomize_mass": True,
"position_perturbation_size": 0.0015,
"quaternion_perturbation_size": 0.003,
"inertia_perturbation_ratio": 0.02,
"mass_perturbation_ratio": 0.02,
# Geom parameters
"geom_names": None, # all geoms randomized
"randomize_friction": True,
"randomize_solref": True,
"randomize_solimp": True,
"friction_perturbation_ratio": 0.1,
"solref_perturbation_ratio": 0.1,
"solimp_perturbation_ratio": 0.1,
# Joint parameters
"joint_names": None, # all joints randomized
"randomize_stiffness": True,
"randomize_frictionloss": True,
"randomize_damping": True,
"randomize_armature": True,
"stiffness_perturbation_ratio": 0.1,
"frictionloss_perturbation_size": 0.05,
"damping_perturbation_size": 0.01,
"armature_perturbation_size": 0.01,
}
class DomainRandomizationWrapper(Wrapper):
"""
Wrapper that allows for domain randomization mid-simulation.
Args:
env (MujocoEnv): The environment to wrap.
seed (int): Integer used to seed all randomizations from this wrapper. It is
used to create a np.random.RandomState instance to make sure samples here
are isolated from sampling occurring elsewhere in the code. If not provided,
will default to using global random state.
randomize_color (bool): if True, randomize geom colors and texture colors
randomize_camera (bool): if True, randomize camera locations and parameters
randomize_lighting (bool): if True, randomize light locations and properties
randomize_dyanmics (bool): if True, randomize dynamics parameters
color_randomization_args (dict): Color-specific randomization arguments
camera_randomization_args (dict): Camera-specific randomization arguments
lighting_randomization_args (dict): Lighting-specific randomization arguments
dynamics_randomization_args (dict): Dyanmics-specific randomization arguments
randomize_on_reset (bool): if True, randomize on every call to @reset. This, in
conjunction with setting @randomize_every_n_steps to 0, is useful to
generate a new domain per episode.
randomize_every_n_steps (int): determines how often randomization should occur. Set
to 0 if randomization should happen manually (by calling @randomize_domain)
"""
def __init__(
self,
env,
seed=None,
randomize_color=True,
randomize_camera=True,
randomize_lighting=True,
randomize_dynamics=True,
color_randomization_args=DEFAULT_COLOR_ARGS,
camera_randomization_args=DEFAULT_CAMERA_ARGS,
lighting_randomization_args=DEFAULT_LIGHTING_ARGS,
dynamics_randomization_args=DEFAULT_DYNAMICS_ARGS,
randomize_on_reset=True,
randomize_every_n_steps=1,
):
super().__init__(env)
self.seed = seed
if seed is not None:
self.random_state = np.random.RandomState(seed)
else:
self.random_state = None
self.randomize_color = randomize_color
self.randomize_camera = randomize_camera
self.randomize_lighting = randomize_lighting
self.randomize_dynamics = randomize_dynamics
self.color_randomization_args = color_randomization_args
self.camera_randomization_args = camera_randomization_args
self.lighting_randomization_args = lighting_randomization_args
self.dynamics_randomization_args = dynamics_randomization_args
self.randomize_on_reset = randomize_on_reset
self.randomize_every_n_steps = randomize_every_n_steps
self.step_counter = 0
self.modders = []
if self.randomize_color:
self.tex_modder = TextureModder(
sim=self.env.sim, random_state=self.random_state, **self.color_randomization_args
)
self.modders.append(self.tex_modder)
if self.randomize_camera:
self.camera_modder = CameraModder(
sim=self.env.sim,
random_state=self.random_state,
**self.camera_randomization_args,
)
self.modders.append(self.camera_modder)
if self.randomize_lighting:
self.light_modder = LightingModder(
sim=self.env.sim,
random_state=self.random_state,
**self.lighting_randomization_args,
)
self.modders.append(self.light_modder)
if self.randomize_dynamics:
self.dynamics_modder = DynamicsModder(
sim=self.env.sim,
random_state=self.random_state,
**self.dynamics_randomization_args,
)
self.modders.append(self.dynamics_modder)
self.save_default_domain()
def reset(self):
"""
Extends superclass method to reset the domain randomizer.
Returns:
| OrderedDict: Environment observation space after reset occurs
"""
| # undo all randomizations
self.restore_default_domain()
# normal env reset
ret = super().reset()
# save the original env parameters
self.save_default_domain()
# reset counter for doing domain randomization at a particular frequency
self.step_counter = 0
# update sims
for modder in self.modders:
modder.update_sim(self.env.sim)
if self.randomize_on_reset:
# domain randomize + regenerate observation
self.randomize_domain()
ret = self.env._get_observations()
return ret
def step(self, action):
"""
Extends vanilla step() function call to accommodate domain randomization
Returns:
4-tuple:
- (OrderedDict) observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
# Step the internal randomization state
self.step_randomization()
return super().step(action)
def step_randomization(self):
"""
Steps the internal randomization state
"""
|
krux/graphite-web | webapp/graphite/events/urls.py | Python | apache-2.0 | 832 | 0 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import url
from . import views
urlpatterns = [
url('^get_data?$', views.get_data, name='events_get_data'),
url(r'( | ?P<event_id>\d+)/$', views.detail, name='events_detail'),
url('^$', views.view_events, name='events | '),
]
|
tangrams/data2image | example/lut.py | Python | mit | 817 | 0.058752 | #!/usr/bin/env python
from PIL import Image
import sys
sys.path.insert(0, r'../python/')
import encode
lut = [[0.8487,0.84751182,0.84479598,0.840213,0.83359314,0.8257851,0.814752,0.80006949,0.78216192,0.76060494,0.73658673,0.7086645,0.67777182,0.64475739,0.60987582,0.57134484,0.52729731,0.48562614,0.45167814],[ | 0,0.0838426,0.1676852,0.2515278,0.3353704,0.419213,0.5030556,0.5868982,0.67182264,0.75336633,0.83518048,0.91537187,0.99339958,1.06872269,1.14066505,1.20841528,1.27035062,1.31998003,1.3523]]
print len(lut[0]),len(lut)
img = Image.new('RGBA', (len(lut[0]), len(lut)), (0,0,0,0))
pixels = img.load()
for y in range(len(lut)):
for x in range(len(lut[0])):
pixels[x,y] = encode.toRGBA(lut[y][x],'number')
pixels[x,y] = encode.toRGBA(lut[y][x],'numbe | r')
img.save(open('lut.png', 'w')) |
meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/rotate_array_permutation.py | Python | gpl-3.0 | 569 | 0 | import fractions
# @include
def rotate_array(rotate_amount, A):
def apply_cyclic_permutation(rotate_amount, offset): |
temp = A[offset]
for i in range(1, cycle_length):
idx = (offset + i * rotate_amount) % len(A)
A[idx], temp = temp, A[idx]
A[offset] = temp
rotate_amount %= len(A)
if rotate_amount == 0:
return
num_cycles = fractions.gcd(len(A), rotate | _amount)
cycle_length = len(A) // num_cycles
for c in range(num_cycles):
apply_cyclic_permutation(rotate_amount, c)
# @exclude
|
dropbox/changes | tests/changes/api/test_snapshot_index.py | Python | apache-2.0 | 2,245 | 0.000445 | from changes.config import db
from changes.models.project import ProjectOption
from changes.models.snapshot import SnapshotStatus
from changes.testutils import APITestCase
class SnapshotListTest(APITestCase):
def test_simple(self):
project_1 = self.create_project()
build_1 = self.create_build(project_1)
snapshot_1 = self.create_snapshot(
project=project_1, status=SnapshotStatus.active, build=build_1)
plan_1 = self.create_plan(project_1)
image_1 = self.create_snapshot_image(snapshot_1, plan_1)
project_2 = self.create_project()
build_2 = self.create_build(project_2)
snapshot_2 = self.create_snapshot(
project=project_2, status=SnapshotStatus.invalidated, build=build_2)
plan_2 = self.create_plan(project_2)
image_2 = self.create_snapshot_image(snapshot_2, plan_1)
image_3 = self.create_snapshot_image(snapshot_2, plan_2)
db.session.add(ProjectOption(
project=project_2,
name='snapshot.current',
value=snapshot_2.id.hex,
))
db.session.commit()
path = '/api/0/snapshots/?state='
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 2
assert data[0]['id'] == snapshot_2.id.hex
assert data[0]['isActive']
assert len(data[0]['images']) == 2
| assert data[0]['images'][0]['id'] == image_2.id.hex
assert data[0]['images'][1]['id'] == image_3.id.hex
assert data[1]['id'] == snapshot_1.id.hex
assert not data[1]['isActive']
assert len(data[1]['images']) == 1
assert data[1]['images'][0]['id'] == image_1.id.hex
path = '/api/0/snapsho | ts/?state=valid'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == snapshot_1.id.hex
path = '/api/0/snapshots/?state=invalid'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == snapshot_2.id.hex
|
evernym/zeno | plenum/test/monitoring/test_monitor_attributes.py | Python | apache-2.0 | 201 | 0 | def testHasMasterPrimary(txnPoolNodeSet):
maste | rPrimaryCount = 0
for node | in txnPoolNodeSet:
masterPrimaryCount += int(node.monitor.hasMasterPrimary)
assert masterPrimaryCount == 1
|
Tankypon/ubuntu-make | umake/network/download_center.py | Python | gpl-3.0 | 9,909 | 0.002321 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Module delivering a DownloadCenter to download in parallel multiple requests"""
from collections import namedtuple
from concurrent import futures
from contextlib import closing
import hashlib
from io import BytesIO
import logging
import os
import tempfile
import requests
import requests.exceptions
from umake.network.ftp_adapter import FTPAdapter
from umake.tools import ChecksumType, root_lock
logger = logging.getLogger(__name__)
class DownloadItem(namedtuple('DownloadItem', ['url', 'checksum', 'headers', 'ignore_encoding', 'cookies'])):
"""An individual item to be downloaded and checked.
Checksum should be an instance of tools.Checksum, if provided.
Headers should be a dictionary of HTTP headers, if provided.
Cookies should be a cookie dictionary, if provided."""
def __new__(cls, url, checksum=None, headers=None, ignore_encoding=False, cookies=None):
return super().__new__(cls, url, checksum, headers, ignore_encoding, cookies)
class DownloadCenter:
"""Read or download requested urls in separate threads."""
BLOCK_SIZE = 1024 * 8 # from urlretrieve code
DownloadResult = namedtuple("DownloadResult", ["buffer", "error", "fd", "final_url", "cookies"])
def __init__(self, urls, on_done, download=True, report=lambda x: None):
"""Generate a threaded download machine.
urls is a list of DownloadItems to download or read from.
on_done is the callback that will be called once all those urls are downloaded.
report, if not None, will be called once any download is in progress, reporting
a dict of current download with current/size parameters
The callback will get a dictionary parameter like:
{
"url":
DownloadResult(buffer=page content as bytes if download | is set to False. close() will clean it from
memory,
error=string detailing the error which occurred (path and content would be empty),
f | d=temporary file descriptor. close() will delete it from disk,
final_url=the final url, which may be different from the start if there were redirects,
cookies=a dictionary of cookies after the request
)
}
"""
self._done_callback = on_done
self._wired_report = report
self._download_to_file = download
self._urls = urls
self._downloaded_content = {}
self._download_progress = {}
executor = futures.ThreadPoolExecutor(max_workers=len(urls))
for url_request in self._urls:
# grab the md5sum if any
# switch between inline memory and temp file
if download:
# Named because shutils and tarfile library needs a .name property
# http://bugs.python.org/issue21044
# also, ensure we keep the same suffix
path, ext = os.path.splitext(url_request.url)
# We want to ensure that we don't create files as root
root_lock.acquire()
dest = tempfile.NamedTemporaryFile(suffix=ext)
root_lock.release()
logger.info("Start downloading {} to a temp file".format(url_request))
else:
dest = BytesIO()
logger.info("Start downloading {} in memory".format(url_request))
future = executor.submit(self._fetch, url_request, dest)
future.tag_url = url_request.url
future.tag_download = download
future.tag_dest = dest
future.add_done_callback(self._one_done)
def _fetch(self, download_item, dest):
"""Get an url content and close the connexion.
This will write the content to dest and check for md5sum.
Return a tuple of (dest, final_url, cookies)
"""
url = download_item.url
checksum = download_item.checksum
headers = download_item.headers or {}
cookies = download_item.cookies
def _report(block_no, block_size, total_size):
current_size = int(block_no * block_size)
if total_size != -1:
current_size = min(current_size, total_size)
self._download_progress[url] = {"current": current_size, "size": total_size}
logger.debug("Deliver download update: {} of {}".format(self._download_progress, total_size))
self._wired_report(self._download_progress)
# Requests support redirection out of the box.
# Create a session so we can mount our own FTP adapter.
session = requests.Session()
session.mount('ftp://', FTPAdapter())
try:
with closing(session.get(url, stream=True, headers=headers, cookies=cookies)) as r:
r.raise_for_status()
content_size = int(r.headers.get('content-length', -1))
# read in chunk and send report updates
block_num = 0
_report(block_num, self.BLOCK_SIZE, content_size)
for data in r.raw.stream(amt=self.BLOCK_SIZE, decode_content=not download_item.ignore_encoding):
dest.write(data)
block_num += 1
_report(block_num, self.BLOCK_SIZE, content_size)
final_url = r.url
cookies = session.cookies
except requests.exceptions.InvalidSchema as exc:
# Wrap this for a nicer error message.
raise BaseException("Protocol not supported.") from exc
if checksum and checksum.checksum_value:
checksum_type = checksum.checksum_type
checksum_value = checksum.checksum_value
logger.debug("Checking checksum ({}).".format(checksum_type.name))
dest.seek(0)
if checksum_type is ChecksumType.sha1:
actual_checksum = self.sha1_for_fd(dest)
elif checksum_type is ChecksumType.md5:
actual_checksum = self.md5_for_fd(dest)
elif checksum_type is ChecksumType.sha256:
actual_checksum = self.sha256_for_fd(dest)
elif checksum_type is ChecksumType.sha512:
actual_checksum = self.sha512_for_fd(dest)
else:
msg = "Unsupported checksum type: {}.".format(checksum_type)
raise BaseException(msg)
logger.debug("Expected: {}, actual: {}.".format(checksum_value,
actual_checksum))
if checksum_value != actual_checksum:
msg = ("The checksum of {} doesn't match. Corrupted download? "
"Aborting.").format(url)
raise BaseException(msg)
return dest, final_url, cookies
def _one_done(self, future):
"""Callback that will be called once the download finishes.
(will be wired on the constructor)
"""
if future.exception():
logger.error("{} couldn't finish download: {}".format(future.tag_url, future.exception()))
result = self.DownloadResult(buffer=None, error=str(future.exception()), fd=None, final_url=None,
cookies=None)
# cleaned unusable temp file as something bad happened
future.tag_de |
mixman/djangodev | django/contrib/auth/tests/forms.py | Python | bsd-3-clause | 10,655 | 0.002065 | from __future__ import with_statement
import os
from django.core import mail
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm
from django.test import TestCase
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'A user with that username already exists.'])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[u"The two password fields didn't match."])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
self.assertEqual(form['password2'].errors,
[u'This field is required.'])
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'Please enter a correct username and password. Note that both fields are case-sensitive.'])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'T | his account is inactive.'])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
| }
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[u'Your old password was entered incorrectly. Please enter it again.'])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(PasswordChangeForm(user, {}).fields.keys(),
['old_password', 'new_password1', 'new_password2'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email':'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors,
[u'Enter a valid e-mail address.'])
def test_nonexistant_email(self):
# Test nonexistant email address
data = {'email':'foo@bar.com'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self |
batiste/django-page-cms | pages/tests/test_functional.py | Python | bsd-3-clause | 35,775 | 0.001621 | # -*- coding: utf-8 -*-
"""Django page CMS functionnal tests suite module."""
from pages.models import Page, Content, PageAlias
from pages.tests.testcase import TestCase
import django
from django.conf import settings
from django.urls import reverse
from pages.utils import get_now
from pages.phttp import get_request_mock
from pages.views import details
import datetime
add_url = reverse("admin:pages_page_add")
changelist_url = reverse("admin:pages_page_changelist")
class FunctionnalTestCase(TestCase):
"""Django page CMS functionnal tests suite class."""
def test_add_page(self):
"""Test that the add admin page could be displayed via the
admin"""
c = self.get_admin_client()
response = c.get(add_url)
self.assertEqual(response.status_code, 200)
def test_create_page(self):
"""Test that a page can be created via the admin."""
c = self.get_admin_client()
page_data = self.get_new_page_data()
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
slug_content = Content.objects.get_content_slug_by_slug(
page_data['slug']
)
assert(slug_content is not None)
page = slug_content.page
self.assertEqual(page.title(), page_data['title'])
self.assertEqual(page.slug(), page_data['slug'])
self.assertNotEqual(page.last_modification_date, None)
def test_delete_page(self):
"""Create a page, then delete it."""
c = self.get_admin_client()
page_data = self.get_new_page_data()
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
slug_content = Content.objects.get_content_slug_by_slug(
page_data['slug']
)
assert(slug_content is not None)
pageCount = Page.objects.count()
page = slug_content.page
page.delete()
slug_content = Content.objects.get_content_slug_by_slug(
page_data['slug']
)
assert(slug_content is None)
self.assertEqual(Page.objects.count(), pageCount - 1)
def test_slug_collision(self):
"""Test a slug collision."""
self.set_setting("PAGE_UNIQUE_SLUG_REQUIRED", True)
c = self.get_admin_client()
page_data = self.get_new_page_data()
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
self.set_setting("PAGE_UNIQUE_SLUG_REQUIRED", False)
response = c.post(add_url, page_data)
self.assertEqual(response.status_code, 200)
page1 = Content.objects.get_content_slug_by_slug(page_data['slug']).page
page_data['position'] = 'first-child'
page_data['target'] = page1.id
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
page2 = Content.objects.get_content_slug_by_slug(page_data['slug']).page
self.assertNotEqual(page1.id, page2.id)
def test_automatic_slug_renaming(self):
"""Test a slug renaming."""
self.set_setting("PAGE_AUTOMATIC_SLUG_RENAMING", True)
c = self.get_admin_client()
page_data = self.get_new_page_data()
page_data['slug'] = "slug"
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
slug = page_data['slug']
page1 = Content.objects.get_content_slug_by_slug(slug).page
page2 = Content.objects.get_content_slug_by_slug(slug+'-2').page
page3 = Content.objects.get_content_slug_by_slug(slug+'-3').page
self.assertNotEqual(page1.id, page2.id)
self.assertNotEqual(page2.id, page3.id)
self.assertEqual(Content.objects.filter(type="slug").count(), 3)
# post again on page 3 doesn't change the slug
page_3_slug = page3.slug()
page_data['slug'] = page_3_slug
response = c.post(reverse("admin:pages_page_change", args=[page3.id]), page_data)
self.assertRedirects(response, changelist_url)
self.assertEqual(Content.objects.filter(type="slug").count(), 3)
content = Content.objects.get_content_slug_by_slug(page_3_slug)
self.assertEqual(page3.id, content.page.id)
# change an old slug of another page and see that it doesn't
# influence the current slug of this page
old_slug = Content.objects.filter(page=page1).latest("creation_date")
new_slug = Content(page=page1, body=page_3_slug, type="slug")
new_slug.creation_date = old_slug.creation_date - datetime.timedelta(seconds=5)
new_slug.save()
self.assertEqual(Content.objects.filter(type="slug").count(), 4)
# check than the old slug doesn't trigger a new slug for page 3
response = c.post(reverse("admin:pages_page_change", args=[page3.id]), page_data)
content = Content.objects.get_content_slug_by_slug(page_3_slug)
self.assertEqual(page3.id, content.page.id)
self.assertEqual(Content.objects.filter(type="slug").count(), 4)
new_slug.creation_date = old_slug.creation_date + datetime.timedelta(seconds=5)
new_slug.save()
# check than the new slug does trigger a new slug for page 3
response = c.post(reverse("admin:pages_page_change", args=[page3.id]), page_data)
content = Content.objects.get_content_slug_by_slug(page_3_slug)
self.assertEqual(page1.id, content.page.id)
content = Content.objects.get_content_slug_by_slug(page_3_slug+'-2')
self.assertEqual(page3.id, content.page.id)
def test_details_view(self):
"""Test the details view basics."""
c = self.get_admin_client()
response = c.get(self.get_page_url())
self.assertEqual(response.status_code, 404)
page_data = self.get_new_page_data()
page_data['status'] = Page.DRAFT
response = c.post(add_url, page_data)
response = c.get(self.get_page_url())
self.assertEqual(response.status_code, 200)
page_data = self.get_new_page_data()
page_data['status'] = Page.PUBLISHED
page_data[ | 'slug'] = 'test-page-2'
page_data['template'] = 'pages/examples/index.html'
response = c.post(add_url, page_data)
self.assertRedirects(response, changelist_url)
response = c.get(self.get_page_url('t | est-page-2'))
self.assertEqual(response.status_code, 200)
def test_edit_page(self):
"""Test that a page can edited via the admin."""
c = self.get_admin_client()
page_data = self.get_new_page_data()
response = c.post(reverse('admin:pages_page_add'), page_data)
self.assertRedirects(response, changelist_url)
page = Page.objects.all()[0]
response = c.get(reverse("admin:pages_page_change", args=[page.id]))
self.assertEqual(response.status_code, 200)
page_data['title'] = 'changed title'
page_data['body'] = 'changed body'
response = c.post(reverse("admin:pages_page_change", args=[page.id]), page_data)
self.assertRedirects(response, changelist_url)
page = Page.objects.get(id=page.id)
self.assertEqual(page.title(), 'changed title')
body = Content.objects.get_content(page, 'en', 'body')
self.assertEqual(body, 'changed body')
def test_site_framework(self):
"""Test the site framework, and test if it's possible to
disable it."""
from pages import settings as pages_settings
# it's not possible to enforce PAGE_USE_SITE_ID in the tests
if not pages_settings.PAGE_USE_SITE_ID:
#TODO: use unittest.skip skip when 2.7
return
# this is necessary to make the test pass
with self.settings(SITE_ID=2):
c = self.get_admin_client()
page_data = self.get_new_page_data()
page_data["sites"] = [2]
response = c.post( |
mhogg/cKDTree-bench | ckdtreebench/spatial_PR4890/__init__.py | Python | bsd-3-clause | 146 | 0 | from __f | uture__ import absolute_import
__all__ = ['cKDTree']
from . import ckdtree
cKDTree = ckdtree.cKDTree
cKDTreeNode = ckdtree.cKDTree | Node
|
viswimmer1/PythonGenerator | data/python_files/32695181/__init__.py | Python | gpl-2.0 | 387 | 0.046512 | import column_chooser
import constants
import library_manager
import list_manager
import main_window
import media
import movie_tagger
import prefs_manager
import prefs_window
import tag_manager
__all__ = ['column_chooser',
'cons | tants',
'library_manager',
'list | _manager',
'main_window',
'media',
'movie_tagger',
'prefs_manager',
'prefs_window',
'tag_manager']
|
alexphelps/django-drip | drip/models.py | Python | mit | 6,448 | 0.003567 | from datetime import datetime, timedelta
import random
from django.db import models
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.functional import cached_property
from drip.utils import get_user_model
# just using this to parse, but totally insane package naming...
# https://bitbucket.org/schinckel/django-timedelta-field/
import timedelta as djangotimedelta
class Drip(models.Model):
date = models.DateTimeField(auto_now_add=True)
lastchanged = models.DateTimeField(auto_now=True)
name = models.CharField(
max_length=255,
unique=True,
verbose_name='Drip Name',
help_text='A unique name for this drip.')
enabled = models.BooleanField(default=False)
from_email = models.EmailField(null=True, blank=True,
help_text='Set a custom from email.')
from_email_name = models.CharField(max_len | gth=150, null=True, blank=Tru | e,
help_text="Set a name for a custom from email.")
subject_template = models.TextField(null=True, blank=True)
body_html_template = models.TextField(null=True, blank=True,
help_text='You will have settings and user in the context.')
message_class = models.CharField(max_length=120, blank=True, default='default')
@property
def drip(self):
from drip.drips import DripBase
drip = DripBase(drip_model=self,
name=self.name,
from_email=self.from_email if self.from_email else None,
from_email_name=self.from_email_name if self.from_email_name else None,
subject_template=self.subject_template if self.subject_template else None,
body_template=self.body_html_template if self.body_html_template else None)
return drip
def __unicode__(self):
return self.name
class SentDrip(models.Model):
"""
Keeps a record of all sent drips.
"""
date = models.DateTimeField(auto_now_add=True)
drip = models.ForeignKey('drip.Drip', related_name='sent_drips')
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), related_name='sent_drips')
subject = models.TextField()
body = models.TextField()
from_email = models.EmailField(
null=True, default=None # For south so that it can migrate existing rows.
)
from_email_name = models.CharField(max_length=150,
null=True, default=None # For south so that it can migrate existing rows.
)
METHOD_TYPES = (
('filter', 'Filter'),
('exclude', 'Exclude'),
)
LOOKUP_TYPES = (
('exact', 'exactly'),
('iexact', 'exactly (case insensitive)'),
('contains', 'contains'),
('icontains', 'contains (case insensitive)'),
('regex', 'regex'),
('iregex', 'contains (case insensitive)'),
('gt', 'greater than'),
('gte', 'greater than or equal to'),
('lt', 'less than'),
('lte', 'less than or equal to'),
('startswith', 'starts with'),
('endswith', 'starts with'),
('istartswith', 'ends with (case insensitive)'),
('iendswith', 'ends with (case insensitive)'),
)
class QuerySetRule(models.Model):
date = models.DateTimeField(auto_now_add=True)
lastchanged = models.DateTimeField(auto_now=True)
drip = models.ForeignKey(Drip, related_name='queryset_rules')
method_type = models.CharField(max_length=12, default='filter', choices=METHOD_TYPES)
field_name = models.CharField(max_length=128, verbose_name='Field name of User')
lookup_type = models.CharField(max_length=12, default='exact', choices=LOOKUP_TYPES)
field_value = models.CharField(max_length=255,
help_text=('Can be anything from a number, to a string. Or, do ' +
'`now-7 days` or `today+3 days` for fancy timedelta.'))
def clean(self):
User = get_user_model()
try:
self.apply(User.objects.all())
except Exception as e:
raise ValidationError(
'%s raised trying to apply rule: %s' % (type(e).__name__, e))
@property
def annotated_field_name(self):
field_name = self.field_name
if field_name.endswith('__count'):
agg, _, _ = field_name.rpartition('__')
field_name = 'num_%s' % agg.replace('__', '_')
return field_name
def apply_any_annotation(self, qs):
if self.field_name.endswith('__count'):
field_name = self.annotated_field_name
agg, _, _ = self.field_name.rpartition('__')
qs = qs.annotate(**{field_name: models.Count(agg, distinct=True)})
return qs
def filter_kwargs(self, qs, now=datetime.now):
# Support Count() as m2m__count
field_name = self.annotated_field_name
field_name = '__'.join([field_name, self.lookup_type])
field_value = self.field_value
# set time deltas and dates
if self.field_value.startswith('now-'):
field_value = self.field_value.replace('now-', '')
field_value = now() - djangotimedelta.parse(field_value)
elif self.field_value.startswith('now+'):
field_value = self.field_value.replace('now+', '')
field_value = now() + djangotimedelta.parse(field_value)
elif self.field_value.startswith('today-'):
field_value = self.field_value.replace('today-', '')
field_value = now().date() - djangotimedelta.parse(field_value)
elif self.field_value.startswith('today+'):
field_value = self.field_value.replace('today+', '')
field_value = now().date() + djangotimedelta.parse(field_value)
# F expressions
if self.field_value.startswith('F_'):
field_value = self.field_value.replace('F_', '')
field_value = models.F(field_value)
# set booleans
if self.field_value == 'True':
field_value = True
if self.field_value == 'False':
field_value = False
kwargs = {field_name: field_value}
return kwargs
def apply(self, qs, now=datetime.now):
kwargs = self.filter_kwargs(qs, now)
qs = self.apply_any_annotation(qs)
if self.method_type == 'filter':
return qs.filter(**kwargs)
elif self.method_type == 'exclude':
return qs.exclude(**kwargs)
# catch as default
return qs.filter(**kwargs)
|
scrapinghub/keystone | keystone/auth/plugins/external.py | Python | apache-2.0 | 6,469 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone External Authentication Plugins"""
import abc
import six
from keystone import auth
from keystone.common import config
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import versionutils
CONF = config.CONF
@six.add_metaclass(abc.ABCMeta)
class Base(auth.AuthMethodHandler):
method = 'external'
def authenticate(self, context, auth_info, auth_context):
"""Use REMOTE_USER to look up the user in the identity backend.
auth_context is an in-out variable that will be updated with the
user_id from the actual user from the REMOTE_USER env variable.
"""
try:
REMOTE_USER = context['environment']['REMOTE_USER']
except KeyError:
msg = _('No authenticated user')
raise exception.Unauthorized(msg)
try:
user_ref = self._authenticate(REMOTE_USER, context)
auth_context['user_id'] = user_ref['id']
if ('kerberos' in CONF.token.bind and
(context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate')):
auth_context['bind']['kerberos'] = user_ref['name']
except Exception:
msg = _('Unable to lookup user %s') % (REMOTE_USER)
raise exception.Unauthorized(msg)
@abc.abstractmethod
def _authenticate(self, remote_user, context):
"""Look up the user in the identity backend.
Return user_ref
"""
pass
@dependency.requires('identity_api')
class DefaultDomain(Base):
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend."""
domain_id = CONF.identity.default_domain_id
user_ref = self.identity_api.get_user_by_name(remote_user, domain_id)
return user_ref
@dependency.requires('assignment_api', 'identity_api')
class Domain(Base):
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend.
The domain will be extracted from the REMOTE_DOMAIN environment
variable if present. If not, the default domain will be used.
"""
username = remote_user
try:
domain_name = context['environment']['REMOTE_DOMAIN']
except KeyError:
domain_id = CONF.identity.default_domain_id
else:
domain_ref = self.assignment_api.get_domain_by_name(domain_name)
domain_id = domain_ref['id']
user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
@dependency.requires('assignment_api', 'identity_api')
class KerberosDomain(Domain):
"""Allows `kerberos` as a method."""
method = 'kerberos'
def _authenticate(self, remote_user, context):
auth_type = context['environment'].get('AUTH_TYPE')
if auth_type != 'Negotiate':
raise exception.Unauthorized(_("auth_type is not Negotiate"))
return super(KerberosDomain, self)._authenticate(remote_user, context)
class ExternalDefault(DefaultDomain):
"""Deprecated. Please use keystone.auth.external.DefaultDomain instead."""
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE,
in_favor_of='keystone.auth.external.DefaultDomain',
remove_in=+1)
def __init__(self):
super(ExternalDefaul | t, self).__init__()
class ExternalDomain(Domain):
"""Deprecated. Please use keystone.auth.external.Domain instead."""
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE,
in_favor_of='keystone.auth.external.Domain',
remove_in=+1)
def __init__(self):
super(ExternalDomain, self).__init__()
@dependency.requires('identity_api')
class LegacyDefaultDomain(Base):
"""Deprecated. Please use keystone.auth.external.DefaultDomain instead.
This | plugin exists to provide compatibility for the unintended behavior
described here: https://bugs.launchpad.net/keystone/+bug/1253484
"""
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE,
in_favor_of='keystone.auth.external.DefaultDomain',
remove_in=+1)
def __init__(self):
super(LegacyDefaultDomain, self).__init__()
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend."""
# NOTE(dolph): this unintentionally discards half the REMOTE_USER value
names = remote_user.split('@')
username = names.pop(0)
domain_id = CONF.identity.default_domain_id
user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
@dependency.requires('assignment_api', 'identity_api')
class LegacyDomain(Base):
"""Deprecated. Please use keystone.auth.external.Domain instead."""
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE,
in_favor_of='keystone.auth.external.Domain',
remove_in=+1)
def __init__(self):
super(LegacyDomain, self).__init__()
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend.
If remote_user contains an `@` assume that the substring before the
rightmost `@` is the username, and the substring after the @ is the
domain name.
"""
names = remote_user.rsplit('@', 1)
username = names.pop(0)
if names:
domain_name = names[0]
domain_ref = self.assignment_api.get_domain_by_name(domain_name)
domain_id = domain_ref['id']
else:
domain_id = CONF.identity.default_domain_id
user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
|
tensorflow/graphics | tensorflow_graphics/geometry/representation/mesh/tests/mesh_test_utils.py | Python | apache-2.0 | 1,686 | 0.004745 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines for mesh unit tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def create_single_triangle_mesh():
r"""Creates a single-triangle mesh, in the z=0 plane and facing +z.
(0,1) 2
|\
| \
| \
(0,0) 0---1 (1,0)
Returns:
vertices: A [3, 3] float array
faces: A [1, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)
faces = np.array(((0, 1, 2),), dtype=np.int32)
return vertices, faces
def create_square_triangle_mesh():
r"""Creates a square mesh, in the z=0 planse and facing +z.
# (0,1) 2---3 (1,1)
# |\ /|
# | 4 |
# |/ \|
# (0,0) 0---1 (1,0)
Returns:
vertices: A [5, 3] float | array
faces: A [4, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),
dtype=np.float32)
faces = np.array(
((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.in | t32)
return vertices, faces
|
dmsurti/mayavi | mayavi/components/actor2d.py | Python | bsd-3-clause | 5,263 | 0.00038 | """A simple actor component.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
# Local imports.
from mayavi.core.component import Component
######################################################################
# `Actor2D` class.
######################################################################
class Actor2D(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The mapper.
mapper = Instance(tvtk.AbstractMapper, record=True)
# The actor.
actor = Instance(tvtk.Prop, record=True)
# The actor's property.
property = Instance(tvtk.Property2D, record=True)
########################################
# View related traits.
# The Actor's view group.
_actor_group = Group(Item(name='visibility'),
Item(name='height'),
Item(name='width'),
show_border=True, label='Actor')
# The View for this object.
view = View(Group(Item(name='actor', style='custom',
editor=InstanceEditor(view=View(_actor_group))),
show_labels=False,
label='Actor'
),
Group(Item(name='mapper',
style='custom',
resizable=True),
show_labels=False,
label='Mapper'),
Group(Item(name='property',
style='custom',
resizable=True),
show_labels=False,
label='Property'),
resizable=True,
)
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
if self.mapper is None:
self.mapper = tvtk.TextMapper()
self.actor = tvtk.Actor2D()
self.property = self.actor.property
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if (len(self.inputs) == 0) or \
(len(self.inputs[0].outputs) == 0):
return
self.configure_connection(self.mapper, self.inputs[0])
self.render()
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Invoke render to update any changes.
self.render()
######################################################################
# Non-public interface.
#################### | ##################################################
def _setup_handlers(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
new.on_trait_change(self.render)
def _mapper_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Setup the inputs to the mapper.
if (len(self.inputs) > 0) and (len(self.inputs[0].outputs) > 0):
| self.configure_connection(new, self.inputs[0])
# Setup the actor's mapper.
actor = self.actor
if actor is not None:
actor.mapper = new
self.render()
def _actor_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Set the mapper.
mapper = self.mapper
if mapper is not None:
new.mapper = mapper
# Set the property.
prop = self.property
if prop is not None:
new.property = prop
# Setup the `actors` trait.
self.actors = [new]
def _property_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Setup the actor.
actor = self.actor
if new is not actor.property:
actor.property = new
def _foreground_changed_for_scene(self, old, new):
# Change the default color for the actor.
self.property.color = new
self.render()
def _scene_changed(self, old, new):
super(Actor2D, self)._scene_changed(old, new)
self._foreground_changed_for_scene(None, new.foreground)
|
ActiveState/code | recipes/Python/444766_cherrypy_RESTResource/recipe-444766.py | Python | mit | 6,312 | 0.003169 | """
REST Resource
cherrypy controller mixin to make it easy to build REST applications.
handles nested resources and method-based dispatching.
here's a rough sample of what a controller would look like using this:
cherrypy.root = MainController()
cherrypy.root.user = UserController()
class PostController(RESTResource):
def index(self,post):
return post.as_html()
index.expose_resource = True
def delete(self,post):
post.destroySelf()
return "ok"
delete.expose_resource = True
def update(self,post,title="",body=""):
post.title = title
post.body = body
return "ok"
update.expose_resource = True
def add(self, post, title="", body="")
post.title = title
post.body = body
return "ok"
update.expose_resource = True
def REST_instantiate(self, slug):
try:
return Post.select(Post.q.slug == slug, Post.q.userID = self.parent.id)[0]
except:
return None
def REST_create(self, slug):
return Post(slug=slug,user=self.parent)
class UserController(RESTResource):
REST_children = {'posts' : PostController()}
def index(self,user):
return user.as_html()
index.expose_resource = True
def delete(self,user):
user.destroySelf()
return "ok"
delete.expose_resource = True
def update(self,user,fullname="",email=""):
user.fullname = fullname
user.email = email
return "ok"
update.expose_resource = True
def add(self, user, fullname="", email=""):
user.fullname = fullname
user.email = email
return "ok"
add.expose_resource = True
def extra_action(self,user):
# do something else
extra_action.expose_resource = True
def REST_instantiate(self, username):
try:
return User.byUsername(username)
except:
return None
def REST_create(self, username):
return User(username=username)
then, the site would have urls like:
/user/bob
/user/bob/posts/my-first-post
/user/bob/posts/my-second-post
which represent REST resources. calling 'GET /usr/bob' would call the index() method on UserController
for the user bob. 'PUT /usr/joe' would create a new user with username 'joe'. 'DELETE /usr/joe'
would delete that user. 'GET /usr/bob/posts/my-first-post' would call index() on the Post Controller
with the post with the slug 'my-first-post' that is owned by bob.
"""
import cherrypy
class RESTResource:
# default method mapping. ie, if a GET request is made for
# the resource's url, it will try to call an index() method (if it exists);
# if a PUT request is made, it will try to call an add() method.
# if you prefer other method names, just override these values in your
# controller with REST_map
REST_defaults = {'DELETE' : 'delete',
'GET' : 'index',
'POST' : 'update',
'PUT' : 'add'}
REST_map = {}
# if the resource has children resources, list them here. format is
# a dictionary of name -> resource mappings. ie,
#
# REST_children = {'posts' : PostController()}
REST_children = {}
def REST_dispatch(self, resource, **params):
# if this gets called, we as | sume that default has already
# traversed down the tree to the right location and this is
# being called for a raw resource
method = cherrypy.request.method
if self.REST_map.has_key(method):
m = getattr(self | ,self.REST_map[method])
if m and getattr(m, "expose_resource"):
return m(resource,**params)
else:
if self.REST_defaults.has_key(method):
m = getattr(self,self.REST_defaults[method])
if m and getattr(m, "expose_resource"):
return m(resource,**params)
raise cherrypy.NotFound
@cherrypy.expose
def default(self, *vpath, **params):
if not vpath:
return self.list(**params)
# Make a copy of vpath in a list
vpath = list(vpath)
atom = vpath.pop(0)
# Coerce the ID to the correct db type
resource = self.REST_instantiate(atom)
if resource is None:
if cherrypy.request.method == "PUT":
# PUT is special since it can be used to create
# a resource
resource = self.REST_create(atom)
else:
raise cherrypy.NotFound
# There may be further virtual path components.
# Try to map them to methods in children or this class.
if vpath:
a = vpath.pop(0)
if self.REST_children.has_key(a):
c = self.REST_children[a]
c.parent = resource
return c.default(*vpath, **params)
method = getattr(self, a, None)
if method and getattr(method, "expose_resource"):
return method(resource, *vpath, **params)
else:
# path component was specified but doesn't
# map to anything exposed and callable
raise cherrypy.NotFound
# No further known vpath components. Call a default handler
# based on the method
return self.REST_dispatch(resource,**params)
def REST_instantiate(self,id):
""" instantiate a REST resource based on the id
this method MUST be overridden in your class. it will be passed
the id (from the url fragment) and should return a model object
corresponding to the resource.
if the object doesn't exist, it should return None rather than throwing
an error. if this method returns None and it is a PUT request,
REST_create() will be called so you can actually create the resource.
"""
raise cherrypy.NotFound
def REST_create(self,id):
""" create a REST resource with the specified id
this method should be overridden in your class.
this method will be called when a PUT request is made for a resource
that doesn't already exist. you should create the resource in this method
and return it.
"""
raise cherrypy.NotFound
|
glenn-edgar/local_controller_3 | __backup__/py_cf_py3/chain_flow.py | Python | mit | 11,716 | 0.004097 | import datetime
import time
from .opcodes_py3 import Opcodes
class CF_Base_Interpreter():
def __init__(self):
self.chains = []
self.chain_map = {}
self.event_queue = []
self.current_chain = None
self.opcodes = Opcodes()
self.valid_return_codes = {}
self.valid_return_codes["CONTINUE"] = 1
self.valid_return_codes["HALT"] = 1
self.valid_return_codes["RESET"] = 1
self.valid_return_codes["DISABLE"] = 1
self.valid_return_codes["TERMINATE"] = 1
self.valid_return_codes["SYSTEM_RESET"] = 1
self.valid_return_codes["BREAK"] = 1
#
# Chain and link construction
#
def define_chain(self, chain_name, auto_start):
chain = {}
chain["name"] = chain_name
chain["index"] = 0
chain["links"] = []
chain["auto_start"] = auto_start
chain["active"] = False
chain["suspend"] = False
self.current_chain = chain
self.chain_map[ chain_name ] = len(self.chains)
self.chains.append(chain)
def insert_link(self, link_name, opcode_name, parameters):
instruction = self.opcodes.get_opcode(opcode_name)
link = {}
link["name"] = link_name
link["op_code_name"] = opcode_name
link["instruction"] = instruction # [0] code [1] local parameters
link["init_flag"] = True
link["active_flag"] = True
link["parameters"] = parameters
assert self.current_chain is not None, "assertion test"
self.current_chain["links"].append(link)
def find_chain_object(self, chain_name):
for i in self.chains:
if chain_name == i["name"]:
return i
return None
def chain_to_list(self, chain):
return_value = chain
if not isinstance(chain, list):
assert isinstance(chain, str), "chain name is not a string "
return_value = [chain]
return return_value
def reset_chain(self, chain):
chain = self.c | hain_to_list(chain)
for i in chain:
assert isinstance(i, str), "chain name is not a string"
k = self.find_chain_object(i)
k["link_index"] = 0
links = k["links"]
for m in links:
m["active_flag"] = True
m["init_flag"] = True
def resume_chain_code(self, chain):
chain = self.chain_to_list(chain)
for i in chain:
assert isinstance(i, str), "chain name is not a string"
| k = self.find_chain_object(i)
k["suspend"] = False
k["active"] = True
def suspend_chain_code(self, chain):
chain = self.chain_to_list(chain)
for i in chain:
assert isinstance(i, str), "chain name is not a string"
k = self.find_chain_object(i)
k["active"] = False
k["suspend"] = True
def disable_chain_base(self, chain):
chain = self.chain_to_list(chain)
for i in chain:
assert isinstance(i, str), "chain name is not a string"
k = self.find_chain_object(i)
k["link_index"] = 0
k["active"] = False
links = k["links"]
for m in links:
m["active_flag"] = False
m["init_flag"] = True
def enable_chain_base(self, chain):
chain = self.chain_to_list(chain)
for i in chain:
assert isinstance(i, str), "chain name is not a string"
# print "i",i
k = self.find_chain_object(i)
k["link_index"] = 0
k["active"] = True
links = k["links"]
for m in links:
m["active_flag"] = True
m["init_flag"] = True
def get_chain_state(self, chain):
assert isinstance(chain, str), "chain name is not a string"
obj = self.find_chain_object(chain)
return obj["active_flag"]
def link_to_list(self, link):
return_value = link
if not isinstance(link, list):
assert isinstance(link, str), "chain name is not a string "
return_value = [link]
return return_value
#
# Link management
#
def find_link_object(self, chain, link):
links = chain["links"]
for i in links:
if link == i["name"]:
return i
return None
def link_to_list(self, link):
return_value = link
if not isinstance(link, list):
assert isinstance(link, str), "chain name is not a string "
return_value = [link]
return return_value
def enable_link(self, link, *ref_chain):
link = self.link_to_list(link)
if len(ref_chain) == 0:
chain = self.current_chain
else:
chain = self.find_chain_object(ref_chain[0])
for j in link:
k = self.find_link_object(chain, j)
k["init_flag"] = True
k["active_flag"] = True
def disable_link(self, link, *ref_chain):
link = self.link_to_list(link)
if len(ref_chain) == 0:
chain = self.current_chain
else:
chain = self.find_chain_object(ref_chain[0])
for j in link:
k = self.find_link_object(chain, j)
k["init_flag"] = True
k["active_flag"] = False
# change state to new link
def change_state(self, active_link, *refChain):
if len(ref_chain) == 0:
chain = self.current_chain
else:
chain = self.find_chain_object(ref_chain[0])
link = self.find_link_obect(chain, active_link)
for i in range(0, len(chain["links"])):
chain["links"][i]["activeFlag"] = False
link["initFlag"] = True
link["activeFlag"] = True
def send_event(self, event_name, event_data):
event = {}
event["name"] = event_name
event["data"] = event_data
self.event_queue.append(event)
def execute_initialize(self):
self.event_queue = []
for j in self.chains:
if j["auto_start"]:
#j["link_index"] = 1
j["active"] = True
self.reset_chain(j["name"])
else:
j["active"] = False
def execute_queue(self):
while True:
if len(self.event_queue) > 0:
event = self.event_queue.pop()
self.execute_event(event)
else:
return
def execute_event(self, event):
for chain in self.chains:
if chain["active"]:
self.current_chain = chain
self.execute_chain(chain, event)
def execute_chain(self, chain, event):
loopFlag = True
chain["link_index"] = 0
while loopFlag:
loopFlag = self.execute_link(chain, event)
def execute_link(self, chain, event):
# print "execute_link", chain["name"]
link_index = chain["link_index"]
self.current_link = link_index
# print "execute link",chain["name"],chain["link_index"],event
if link_index >= len(chain["links"]):
return False
link = chain["links"][link_index]
opcode_name = link["op_code_name"]
instruction = link["instruction"]
init_flag = link["init_flag"]
active_flag = link["active_flag"]
parameters = link["parameters"]
return_value = True
if active_flag:
if init_flag:
init_event = {}
init_event["name"] = "INIT"
return_value = instruction(self, chain, parameters, init_event)
link["init_flag"] = False
# print "initialize",return_value
else:
return_value = "CONTINUE"
# print "no_init"
if (return_value != "DISABLE") and (return_value != "RESET"):
temp = instruction(self, chain, parameters, event)
# print "temp",temp
return_value = self.check_return_cod |
douglasbagnall/sonograms | server.py | Python | mit | 8,302 | 0.00265 | #!/usr/bin/python
import random, re
import anydbm
import os, sys
from flask import Flask, render_templat | e, request, make_response
app = Flask(__name__)
IGNORED = 'ignored'
INTERESTING = 'interesting'
PENDING_FILES = set()
WAV_DIR = 'static/wav'
IGNORED_WAV_DIRS = ('doc-kiwi',
'doc-morepork', 'rfpt-15m',
'doc-minutes',
'doc-interesting',
'doc-weka',
'snippets',
| )
#WAV_DIR = 'static/wav-test'
CALLS_FOUND = 0
FILES_PROCESSED = 0
FILES_IGNORED = 0
FILES_INTERESTING = 0
UNCONFIRMED_TIMES = {}
REQUESTED_FILES = set()
DEFAULT_DBM_FILE = 'calls.dbm'
def gen_times_from_file(fn):
f = open(fn)
for line in f:
line = line.strip()
if ' ' in line:
wav, times = line.split(None, 1)
yield (wav, sanitise_times(times))
else:
yield (line, [])
f.close()
def load_from_files(fn, ignored=None):
"""This is for when the database crashes."""
if ignored:
f = open(ignored)
for line in f:
line = line.strip()
if line:
DB[line] = IGNORED
f.close()
for wav, times in gen_times_from_file(fn):
DB[wav] = ' '.join(times)
def set_up_dbm_and_file_list(dbm_file, included_wav_dirs=[], included_wavs=None):
global DB, FILES, CALLS_FOUND, FILES_PROCESSED, FILES_IGNORED, FILES_INTERESTING
DB = anydbm.open(dbm_file, 'c')
# sync with filesystem on start up
for dirpath, dirnames, filenames in os.walk(WAV_DIR, followlinks=True):
d = re.sub(WAV_DIR + '/?', '', dirpath)
if included_wav_dirs:
if d not in included_wav_dirs:
continue
elif d in IGNORED_WAV_DIRS:
continue
for fn in filenames:
if fn.endswith('.wav'):
if included_wavs and fn not in included_wavs:
continue
if d:
ffn = d + '/' + fn
else:
ffn = fn
try:
if ffn not in DB or included_wavs:
PENDING_FILES.add(ffn)
except:
print >>sys.stderr, "couldn't add %s, stupid dbm" % ffn
for fn, calls in DB.iteritems():
if calls == IGNORED:
FILES_IGNORED += 1
else:
FILES_INTERESTING += calls.startswith(INTERESTING)
FILES_PROCESSED += 1
CALLS_FOUND += calls.count(' ') // 2
ffn = os.path.join(WAV_DIR, fn)
if not os.path.exists(ffn):
print >> sys.stderr, "%s is missing" % ffn
#load_from_files('times/results-319.txt', 'times/ignored-319.txt')
DB.sync()
def sanitise_times(times):
if not times:
return []
if isinstance(times, (str, unicode)):
times = times.encode('utf-8').strip()
if ',' in times:
times = times.split(',')
else:
times = times.split()
if times[0] == INTERESTING:
times = times[1:]
if not times:
return []
times = [float(x) for x in times if x]
if len(times) & 1:
raise ValueError("len(times) is odd: %d" % len(times))
#so, now times is a possibly empty list of floats
#split it into pairs
pairs = [times[i : i + 2] for i in range(0, len(times), 2)]
for s, e in pairs:
if e < s:
raise ValueError("pair %s,%s has end before start" % (s, e))
pairs.sort()
ls, le = pairs[0]
combined = []
for i in range(1, len(pairs)):
rs, re = pairs[i]
if rs <= le: #overlap --> merge
le = re
else:
combined.append(ls)
combined.append(le)
ls = rs
le = re
combined.append(ls)
combined.append(le)
return combined
def save_results():
global FILES_PROCESSED, FILES_IGNORED, CALLS_FOUND, FILES_INTERESTING
if request.method == 'POST':
get = request.form.get
else:
get = request.args.get
wav = get('wav')
if wav is None:
return "Hello!"
wav = wav.encode('utf-8')
if not wav in PENDING_FILES:
return "wav file '%s' is unknown" % wav
if get('skip'):
return "Skipped '%s'" % wav
PENDING_FILES.discard(wav)
if get('ignore'):
DB[wav] = IGNORED
FILES_IGNORED += 1
return "added '%s' to ignored list" % wav
call_string = get('calls')
call_times = sanitise_times(call_string)
time_string = ' '.join("%.2f" % x for x in call_times)
if get('interesting'):
FILES_INTERESTING += 1
interesting_string = INTERESTING + ' '
else:
interesting_string = ''
FILES_PROCESSED += 1
CALLS_FOUND += len(call_times) // 2
DB[wav] = interesting_string + time_string
DB.sync()
return "saved %d calls in %s" % (len(call_times) / 2, wav)
def get_known_calls(wav):
if wav is not None:
basewav = os.path.basename(wav)
if basewav in UNCONFIRMED_TIMES:
wav = basewav
return UNCONFIRMED_TIMES.get(wav, [])
@app.route('/', methods=['GET', 'POST'])
def main_page():
msg = save_results()
if PENDING_FILES:
wav = random.sample(PENDING_FILES, 1)[0]
else:
wav = None
known_calls = ','.join(str(x) for x in get_known_calls(wav))
return render_template('audio.html', wav=wav, wavdir=WAV_DIR, msg=msg,
files_remaining=len(PENDING_FILES),
files_processed=FILES_PROCESSED, files_ignored=FILES_IGNORED,
files_interesting=FILES_INTERESTING,
calls_found=CALLS_FOUND, known_calls=known_calls,
species=SPECIES)
@app.route('/results.txt')
def results():
lines = []
ignored = []
interesting = []
for k, v in DB.iteritems():
if v == IGNORED:
ignored.append(k)
else:
if v.startswith(INTERESTING):
interesting.append(k)
s = ' '.join('%s' % x for x in sanitise_times(v))
lines.append("%s %s\n" % (k, s))
lines.sort()
text = ''.join(lines)
f = open('results-%d.txt' % len(lines), 'w')
f.write(text)
f.close()
f = open('ignored-%d.txt' % len(lines), 'w')
f.write('\n'.join(ignored) + '\n')
f.close()
f = open('interesting-%d.txt' % len(lines), 'w')
f.write('\n'.join(interesting) + '\n')
f.close()
response = make_response(text)
response.headers["content-type"] = "text/plain"
DB.sync()
return response
def main():
global SPECIES, UNCONFIRMED_TIMES, DB
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--load-times',
help='load times from here')
parser.add_argument('-s', '--species', default='morepork',
help='species to search for (morepork|weka)')
parser.add_argument('--world-visible', action='store_true',
help='Allow connections from beyond localhost')
parser.add_argument('--dbm-file', default=DEFAULT_DBM_FILE,
help='Use this DBM file')
parser.add_argument('--include-wav-dir', action='append',
help='Use files from this subdirectory of %s' % WAV_DIR)
parser.add_argument('--timed-files-only', action='store_true',
help='Only use the files referenced by --load-times')
args = parser.parse_args()
if args.load_times:
for wav, times in gen_times_from_file(args.load_times):
REQUESTED_FILES.add(wav)
if times:
UNCONFIRMED_TIMES[wav] = times
SPECIES = args.species
if args.timed_files_only:
included_wavs = REQUESTED_FILES
else:
included_wavs = None
try:
set_up_dbm_and_file_list(args.dbm_file, included_wav_dirs=args.include_wav_dir,
included_wavs=included_wavs)
if not args.world_visible:
app.run(debug=True)
else:
app.run(host='0.0.0.0')
e |
alissonbf/blog-teste | blog/urls.py | Python | gpl-2.0 | 989 | 0.005097 | # -*- coding: utf-8 -*-
"""
------
Urls
------
Arquivo de configuração das urls da aplicação blog
Autores:
* Alisson Barbosa Ferreira <alissonbf@hotmail.com>
Data:
============== ==================
Criação Atualização
============== ==================
29/11/2014 29/11/2014
============== ==================
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('blog.views',
url(r'^cadastro-usuario/$', 'usuario', name='usuario'),
url(r'^cadastro-post/$ | ', 'post', name='post'),
url(r'^api-all-posts', 'all_posts', name='all_posts'),
url(r'^api-get-post/(?P<pk>[0-9]+)/$', 'get_post', name='get_post'),
url(r'^api-auth', 'api_auth', name='api_auth'),
url(r'^api-token', 'api_token', name='api_token'),
url(r'^api-login', 'api_login', name='api_login'),
url(r'^enviar-email/$' | , 'enviar_email', name='enviar_email'),
url(r'^autorelacionamento/$', 'autorelacionamento', name='autorelacionamento'),
) |
kynikos/outspline | src/outspline/interfaces/wxgui/rootw.py | Python | gpl-3.0 | 17,506 | 0.0008 | # Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <dev@dariogiovannetti | .net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ou | tspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
import threading
import wx
import wx.lib.newevent
import outspline.coreaux_api as coreaux_api
from outspline.coreaux_api import Event
import outspline.core_api as core_api
import art
import menubar
import notebooks
import databases
import msgboxes
import logs
import sessions
application_loaded_event = Event()
show_main_window_event = Event()
hide_main_window_event = Event()
class GUI(wx.App):
def __init__(self):
self.config = coreaux_api.get_interface_configuration('wxgui')
wx.App.__init__(self, False)
self.artprovider = art.ArtProvider()
self.root = MainFrame()
self.logs_configuration = logs.LogsConfiguration()
self.menu = self.root.menu
self.nb_left = self.root.mainpanes.nb_left
self.nb_right = self.root.mainpanes.nb_right
self.uncaught_max = self.config.get_int('max_exceptions')
self.uncaught_counter = 0
self.uncaught_event = threading.Event()
core_api.bind_to_blocked_databases(self._handle_blocked_databases)
if self.uncaught_max > 0:
coreaux_api.bind_to_uncaught_exception(
self._handle_uncaught_exception)
# Window managers like i3 and awesome need MainFrame to be shown here,
# not at the end of its constructor, or EVT_WINDOW_CREATE will be sent
# too early (bug #366)
self.root.Centre()
self.root.Show(True)
def exit_app(self, event):
self._export_options()
# Refresh the session also when exiting, in order to save the order of
# visualization of the tabs
# Do it *before* closing the databases
self.root.sessionmanager.refresh_session()
# close_all_databases() already blocks the databases
if self.menu.file.close_all_databases(event, exit_=True):
core_api.exit_()
coreaux_api.bind_to_uncaught_exception(
self._handle_uncaught_exception, False)
self.root.Destroy()
# else: event.Veto() doesn't work here
def _export_options(self):
self.config['show_logs'] = 'yes' if \
self.logs_configuration.is_shown() else 'no'
if self.config.get_bool('remember_geometry'):
self.root.save_geometry()
self.config.export_upgrade(coreaux_api.get_user_config_file())
def _handle_blocked_databases(self, kwargs):
msgboxes.blocked_databases().ShowModal()
def _handle_uncaught_exception(self, kwargs):
if self.uncaught_counter < self.uncaught_max:
# Increment in this thread, otherwise uncaught_event won't work
self.uncaught_counter += 1
if coreaux_api.is_main_thread():
self._show_uncaught_dialog(kwargs)
else:
wx.CallAfter(self._show_uncaught_dialog, kwargs)
self.uncaught_event.wait()
def _show_uncaught_dialog(self, kwargs):
msgboxes.uncaught_exception(kwargs['exc_info']).ShowModal()
if self.uncaught_counter == 1:
# Only unbind here, otherwise another thread that crashes would
# bypass this whole mechanism
coreaux_api.bind_to_uncaught_exception(
self._handle_uncaught_exception, False)
self.root.Destroy()
# No need to set self.uncaught_counter = 0
self.uncaught_event.set()
# No need to call self.uncaught_event.clear()
else:
self.uncaught_counter -= 1
class MainFrame(wx.Frame):
def __init__(self):
self._ROOT_MIN_SIZE = (600, 408)
self.config = coreaux_api.get_interface_configuration('wxgui')
confsize = [int(s) for s in self.config['initial_geometry'].split('x')]
clarea = wx.Display().GetClientArea()
initsize = [min((confsize[0], clarea.GetWidth())),
min((confsize[1], clarea.GetHeight()))]
wx.Frame.__init__(self, None, title='Outspline', size=initsize)
self.SetMinSize(self._ROOT_MIN_SIZE)
if self.config.get_bool('maximized'):
self.Maximize()
self.SetIcons(wx.GetApp().artprovider.get_frame_icon_bundle(
"&outspline"))
self.menu = menubar.RootMenu(self)
self.SetMenuBar(self.menu)
self._init_accelerators()
self.mainpanes = MainPanes(self, self.menu)
self.close_handler = False
self.Bind(wx.EVT_WINDOW_CREATE, self._handle_creation)
self.bind_to_close_event(wx.GetApp().exit_app)
coreaux_api.bind_to_external_nudge(self._handle_external_nudge)
# Window managers like i3 and awesome need MainFrame to be shown after
# the instantiation of this class, or EVT_WINDOW_CREATE will be sent
# too early (bug #366)
#self.Centre()
#self.Show(True)
def _init_accelerators(self):
aconfig = self.config("ContextualShortcuts")
self.accmanager = AcceleratorsManagers()
altmovkeys = AlternativeMovementKeys()
self.accmanager.create_manager(self, {
aconfig["up"]: altmovkeys.simulate_up,
aconfig["down"]: altmovkeys.simulate_down,
aconfig["left"]: altmovkeys.simulate_left,
aconfig["right"]: altmovkeys.simulate_right,
"Shift+{}".format(aconfig["up"]): altmovkeys.simulate_shift_up,
"Shift+{}".format(aconfig["down"]): altmovkeys.simulate_shift_down,
"Shift+{}".format(aconfig["left"]): altmovkeys.simulate_shift_left,
"Shift+{}".format(aconfig["right"]):
altmovkeys.simulate_shift_right,
"Ctrl+{}".format(aconfig["up"]): altmovkeys.simulate_ctrl_up,
"Ctrl+{}".format(aconfig["down"]): altmovkeys.simulate_ctrl_down,
"Ctrl+{}".format(aconfig["left"]): altmovkeys.simulate_ctrl_left,
"Ctrl+{}".format(aconfig["right"]): altmovkeys.simulate_ctrl_right,
"Ctrl+Shift+{}".format(aconfig["up"]):
altmovkeys.simulate_ctrl_shift_up,
"Ctrl+Shift+{}".format(aconfig["down"]):
altmovkeys.simulate_ctrl_shift_down,
"Ctrl+Shift+{}".format(aconfig["left"]):
altmovkeys.simulate_ctrl_shift_left,
"Ctrl+Shift+{}".format(aconfig["right"]):
altmovkeys.simulate_ctrl_shift_right,
aconfig["focus_database"]:
self.menu.view.databases_submenu.ID_FOCUS,
aconfig["focus_rightnb"]: self.menu.view.rightnb_submenu.ID_FOCUS,
aconfig["focus_logs"]: self.menu.view.logs_submenu.ID_FOCUS,
})
def _handle_creation(self, event):
self.Unbind(wx.EVT_WINDOW_CREATE, handler=self._handle_creation)
self.menu.post_init()
databases.dbpropmanager.post_init()
if self.config.get_bool('remember_session'):
self.sessionmanager = sessions.SessionManager()
application_loaded_event.signal()
def _handle_external_nudge(self, kwargs):
self.show()
def bind_to_close_event(self, handler):
if self.close_handler:
|
birocorneliu/conference | lib/models.py | Python | apache-2.0 | 4,402 | 0.00159 | #!/usr/bin/env python
import httplib
import endpoints
from protorpc import messages, message_types
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Me | ssage):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionKeysWishlist = messages.StringField(5, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value | """
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class SpeakerMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
speaker = messages.StringField(1, required=True)
class SpeakerForm(messages.Message):
name = messages.StringField(1, required=True)
age = messages.IntegerField(2)
specialization = messages.StringField(3)
class SessionForm(messages.Message):
name = messages.StringField(1, required=True)
speaker = messages.MessageField(SpeakerForm, 2, required=True)
startTime = messages.IntegerField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5)
date = messages.StringField(6)
highlights = messages.StringField(7, repeated=True)
websafeKey = messages.StringField(8)
class SessionsForm(messages.Message):
items = messages.MessageField(SessionForm, 1, repeated=True)
createSessionForm = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True)
)
getConferenceForm = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
getSessionForm = endpoints.ResourceContainer(
message_types.VoidMessage,
SessionKey=messages.StringField(1),
)
updateConferenceForm = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
getConferenceSessionsByTypeForm = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
class getSessionsTask3Form(messages.Message):
lastStartTimeHour = messages.IntegerField(1, required=True)
unwantedTypeOfSession = messages.StringField(2, required=True)
|
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/effective_route.py | Python | mit | 2,684 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveRoute(Model):
"""Effective Route.
:param name: The name of the user defined route. This is optional.
:type name: str
:param source: Who created the route. Possible values are: 'Unknown',
'User', 'VirtualNetworkGateway', and 'Default'. Possible values include:
'Unknown', 'User', 'VirtualNetworkGatew | ay', 'Default'
:type source: str or
~azure.mgmt.network. | v2016_09_01.models.EffectiveRouteSource
:param state: The value of effective route. Possible values are: 'Active'
and 'Invalid'. Possible values include: 'Active', 'Invalid'
:type state: str or
~azure.mgmt.network.v2016_09_01.models.EffectiveRouteState
:param address_prefix: The address prefixes of the effective routes in
CIDR notation.
:type address_prefix: list[str]
:param next_hop_ip_address: The IP address of the next hop of the
effective route.
:type next_hop_ip_address: list[str]
:param next_hop_type: The type of Azure hop the packet should be sent to.
Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet',
'VirtualAppliance', and 'None'. Possible values include:
'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance',
'None'
:type next_hop_type: str or
~azure.mgmt.network.v2016_09_01.models.RouteNextHopType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'address_prefix': {'key': 'addressPrefix', 'type': '[str]'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': '[str]'},
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EffectiveRoute, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.source = kwargs.get('source', None)
self.state = kwargs.get('state', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
self.next_hop_type = kwargs.get('next_hop_type', None)
|
pombredanne/readthedocs.org | readthedocs/core/signals.py | Python | mit | 1,987 | 0.00151 | """Signal handling for core app."""
from __future__ import absolute_import
import logging
from corsheaders import signals
from django.dispatch import Signal
from django.db.models import Q
from future.backports.urllib.parse import urlparse
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
WHITELIST_URLS | = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']
webhook_github = Signal(providing_args=['project', 'data', 'event'])
webhook_gitlab = Signal(providing_args=['project', 'data', 'event'])
webhook_bitbucket = Signal(providing_args=['project', 'data', 'even | t'])
def decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument
"""
Decide whether a request should be given CORS access.
This checks that:
* The URL is whitelisted against our CORS-allowed domains
* The Domain exists in our database, and belongs to the project being queried.
Returns True when a request should be given CORS access.
"""
if 'HTTP_ORIGIN' not in request.META:
return False
host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]
valid_url = False
for url in WHITELIST_URLS:
if request.path_info.startswith(url):
valid_url = True
if valid_url:
project_slug = request.GET.get('project', None)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
log.warning(
'Invalid project passed to domain. [{project}:{domain}'.format(
project=project_slug,
domain=host,
)
)
return False
domain = Domain.objects.filter(
Q(domain__icontains=host),
Q(project=project) | Q(project__subprojects__child=project)
)
if domain.exists():
return True
return False
signals.check_request_enabled.connect(decide_if_cors)
|
rwl/PyCIM | CIM15/IEC61970/Informative/InfGMLSupport/GmlTopologyStyle.py | Python | mit | 3,427 | 0.002334 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlTopologyStyle(IdentifiedObject):
"""The style for one topology property. Similarly to the Geometry style, a feature can have multip | le topology properties, thus multiple topology style descriptors can be specified within one feature style.The style for one topology property. Similarly to the Geometry style, a feature can have multiple topology properties, thus multiple topology style descriptors can | be specified within one feature style.
"""
def __init__(self, GmlLableStyle=None, GmlFeatureStyle=None, *args, **kw_args):
"""Initialises a new 'GmlTopologyStyle' instance.
@param GmlLableStyle:
@param GmlFeatureStyle:
"""
self._GmlLableStyle = None
self.GmlLableStyle = GmlLableStyle
self._GmlFeatureStyle = None
self.GmlFeatureStyle = GmlFeatureStyle
super(GmlTopologyStyle, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["GmlLableStyle", "GmlFeatureStyle"]
_many_refs = []
def getGmlLableStyle(self):
return self._GmlLableStyle
def setGmlLableStyle(self, value):
if self._GmlLableStyle is not None:
filtered = [x for x in self.GmlLableStyle.GmlTopologyStyles if x != self]
self._GmlLableStyle._GmlTopologyStyles = filtered
self._GmlLableStyle = value
if self._GmlLableStyle is not None:
if self not in self._GmlLableStyle._GmlTopologyStyles:
self._GmlLableStyle._GmlTopologyStyles.append(self)
GmlLableStyle = property(getGmlLableStyle, setGmlLableStyle)
def getGmlFeatureStyle(self):
return self._GmlFeatureStyle
def setGmlFeatureStyle(self, value):
if self._GmlFeatureStyle is not None:
filtered = [x for x in self.GmlFeatureStyle.GmlTobologyStyles if x != self]
self._GmlFeatureStyle._GmlTobologyStyles = filtered
self._GmlFeatureStyle = value
if self._GmlFeatureStyle is not None:
if self not in self._GmlFeatureStyle._GmlTobologyStyles:
self._GmlFeatureStyle._GmlTobologyStyles.append(self)
GmlFeatureStyle = property(getGmlFeatureStyle, setGmlFeatureStyle)
|
baloan/mt-krpc | krpc/lko.py | Python | mit | 899 | 0.003337 | #!/usr/bin/env python3
# -*- coding: cp1252 -*-
# created on May 21, 2014 by baloan
"""
Launch to orbit (with atmosphere)
"""
from threading import Thread
import krpc
from toolkit import ksp
from toolkit import launch
from toolkit import system
from toolkit import warp
from vessels import surveyor, stock
STAGING_DICT = {
"Surveyor 1": surveyor.surveyor1,
"Kerbal X": stock.default,
}
def main():
cx = ksp.connect(name='Trajectory')
ksp.set_globals(cx)
# system.checkvessel | ("Surveyor 1")
warp.warpday()
# setup st | aging
try:
staging = STAGING_DICT[SC.active_vessel.name]
except KeyError:
staging = stock.default
stage = Thread(target=staging, args=["Staging", ])
# launch to orbit
stage.start()
launch.ltoa()
system.tts()
if __name__ == "__main__":
main()
|
arvinddoraiswamy/blahblah | cryptopals/Set2/c11.py | Python | mit | 625 | 0.0144 | import sys
import os
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import block
if __name__ == "__main__":
plaintext= 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | aaa | aaaaaaaaaa'
is_aes_mode_ecb= block.encryption_oracle(plaintext)
if is_aes_mode_ecb == 1:
print "String ",plaintext, "is AES encrypted with ECB mode"
else:
print "String ",plaintext, "is AES encrypted with CBC mode"
|
hzlf/openbroadcast | website/apps/alibrary/management/commands/import_folder.py | Python | gpl-3.0 | 8,608 | 0.014292 | #-*- coding: utf-8 -*-
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from optparse import make_option
import os
import sys
import re
from django.template.defaultfilters import slugify
from alibrary.models import Artist, Release, Media, Label
from filer.models.filemodels import File
from filer.models.audiomodels import Audio
from filer.models.imagemodels import Image
from audiotools import AudioFile, MP3Audio, M4AAudio, FlacAudio, WaveAudio, MetaData
import audiotools
class FolderImporter(object):
def __init__(self, * args, **kwargs):
self.path = kwargs.get('path')
self.verbosity = int(kwargs.get('verbosity', 1))
def import_file(self, file, folder):
print "#########################"
print folder.name
print "#########################"
"""
Create a Audio or an Image into the given folder
"""
try:
iext = os.path.splitext(file.name)[1].lower()
except:
iext = ''
print 'iext:'
print iext
if iext in ['.jpg', '.jpeg', '.png', '.gif']:
obj, created = Image.objects.get_or_create(
original_filename=file.name,
file=file,
folder=folder,
is_public=True)
print 'obj:',
print obj
if iext in ['.mp3', '.flac', '.wav', '.aiff']:
obj, created = Audio.objects.get_or_create(
original_filename=file.name,
file=file,
folder=folder,
is_public=False)
if obj:
print 'have object'
return obj
else:
return None
def walker(self, path=None, base_folder=None):
# Hardcoded as it is a test only...
label_name = 'HH-Digital'
label = Label.objects.get(name=label_name)
path = path or self.path
path = unicode(os.path.normpath(path))
file_list = []
file_size = 0
folder_count = 0
rootdir = sys.arg | v[1]
releases = []
artists = []
tracks = []
#pattern = "^/[A-Za-z0-9.]/*$"
pattern = re.compile('^/(?P<artist>[a-zA-Z0-9 ]+)/(?P<release>[a-zA-Z0-9 ]+)/(?P<tracknumber>[\d]+)[ ]* - [ ]*(?P<track>[ | a-zA-Z0-9 -_]+)\.[a-zA-Z]+.*')
pattern = re.compile('^/(?P<artist>[a-zA-Z0-9 ]+)/(?P<release>[a-zA-Z0-9 ]+)/(?P<tracknumber>[ab]?\d+?)[ | - ](?P<track>[Ça-zA-Z0-9 -_]+)\.[a-zA-Z]+.*')
'^/(?P<release>[a-zA-Z0-9 ]+)/(?P<tracknumber>[ab]?\d+?)[ | -](?P<artist>[a-zA-Z0-9 ]+) - (?P<track>[Ça-zA-Z0-9 -_]+)\.[a-zA-Z]+.*'
for root, subFolders, files in os.walk(path):
folder_count += len(subFolders)
for file in files:
f = os.path.join(root,file)
file_size = file_size + os.path.getsize(f)
rel_path = f[len(path):]
rel_path = rel_path.encode('ascii', 'ignore')
#rel_path = '/The Prodigy/The Fat Of The Land/04 - Funky Stuff.flac'
match = pattern.search(rel_path)
#print
#print
print '-------------------------------------------------------------'
try:
print match.groups()
artist_name = match.group('artist')
release_name = match.group('release')
track_name = match.group('track')
tracknumber = match.group('tracknumber')
"""
print 'artist: %s' % artist_name
print 'release: %s' % release_name
print 'track: %s' % track_name
print 'tracknumber: %s' % tracknumber
"""
if not artist_name in artists:
artists.append(artist_name)
if not release_name in releases:
releases.append(release_name)
""""""
release, release_created = Release.objects.get_or_create(name=release_name, slug=slugify(release_name), label=label)
artist, artist_created = Artist.objects.get_or_create(name=artist_name, slug=slugify(artist_name))
media, media_created = Media.objects.get_or_create(name=track_name, tracknumber=tracknumber, artist=artist, release=release)
dj_file = DjangoFile(open(os.path.join(root, file)), name=file)
"""
print "**:",
print dj_file,
print dj_file.size
"""
if not media.master:
master = self.import_file(file=dj_file, folder=release.get_folder('tracks'))
media.master = master
media.save()
if not release.main_image:
print 'Image missing'
tfile = 'temp/cover.jpg'
audiofile = audiotools.open(os.path.join(root, file))
metadata = audiofile.get_metadata()
for image in metadata.images():
#print image.data
f = open(tfile,"wb")
f.write(image.data)
f.close()
dj_file = DjangoFile(open(tfile), name='cover.jpg')
cover = self.import_file(file=dj_file, folder=release.get_folder('pictures'))
release.main_image = cover
release.save()
#track.get_metadata()
pass
"""
artist, artist_created = Artist.objects.get_or_create(name=artist_name)
if not artist_created:
#pass
artist.save()
"""
except Exception, e:
print e
pass
# help(match)
#print match.group(0)
# print match
#print(f)
#print rel_path
file_list.append(f)
#print("Total Size is {0} bytes".format(file_size))
#print("Total Files ", len(file_list))
#print("Total Folders ", folder_count)
print "# Artists ----------------------------------------------"
print artists
print "# Releases ---------------------------------------------"
print releases
"""
for root, dirs, files in os.walk(path):
print files
for file in files:
print file
"""
class Command(NoArgsCommand):
"""
Import directory structure into alibrary:
|
dwaithe/FCS_point_correlator | focuspoint/correlation_gui.py | Python | gpl-2.0 | 51,790 | 0.014501 | import struct
import numpy as np
#import scipy.weave as weave
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import sys, csv, os
from PyQt5 import QtGui, QtCore, QtWidgets
#import matplotlib
#matplotlib.use('Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.transforms import ScaledTranslation
import random
import errno
import os.path
from scipy.special import _ufuncs_cxx
import pickle
from correlation_objects import *
import tifffile as tif_fn
import json
"""FCS Bulk Correlation Software
Copyright (C) 2015 Dominic Waithe
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
class folderOutput(QtWidgets.QMainWindow):
def __init__(self,parent):
super(folderOutput, self).__init__()
self.initUI()
self.parent = parent
self.parent.config ={}
try:
self.parent.config = pickle.load(open(os.path.expanduser('~')+'/FCS_Analysis/config.p', "rb" ));
self.filepath = self.parent.config['output_corr_filepath']
except:
self.filepath = os.path.expanduser('~')+'/FCS_Analysis/output/'
try:
os.makedirs(self.filepath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def initUI(self):
self.textEdit = QtWidgets.QTextEdit()
self.setCentralWidget(self.textEdit)
self.statusBar()
openFile = QtWidgets.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(300, 300, 350, 500)
self.setWindowTitle('Select a Folder')
#self.show()
def showDialog(self):
if self.type == 'output_corr_dir':
#folderSelect = QtGui.QFileDialog()
#folderSelect.setDirectory(self.filepath);
tfilepath = str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory",self.filepath))
if tfilepath !='':
self.filepath = tfilepath
#Save to the config file.
self.parent.config['output_corr_filepath'] = str(tfilepath)
pickle.dump(self.parent.config, open(str(os.path.expanduser('~')+'/FCS_Analysis/config.p'), "wb" ))
class Annotate():
def __init__(self,win_obj,par_obj,scrollBox):
self.ax = plt.gca()
self.x0 = []
self.par_obj = par_obj
self.win_obj = win_obj
self.scrollBox = scrollBox
self.pickerSelect = False;
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
def on_press(self, event):
self.ax.figure.canvas.draw()
self.x0 = event.xdata
def on_release(self, event):
#self.rect.remove()
self.x1 = event.xdata
if(self.x0 <0): self.x0 =0
if(self.x1 <0): self.x1 =0
if(self.x0 >self.x1): self.x1b =self.x0;self.x0=self.x1;self.x0=self.x1b
self.scrollBox.rect.append(plt.axvspan(self.x0, self.x1, facecolor=self.par_obj.colors[self.scrollBox.rect.__len__() % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.ax.figure.canvas.draw()
#Saves regions to series of arrays. Opted not to make class for this. Not sure why :-)
self.scrollBox.x0.append(self.x0)
self.scrollBox.x1.append(self.x1)
self.scrollBox.color = self.par_obj.colors[self.scrollBox.rect.__len__()]
self.scrollBox.TGid.append(self.par_obj.TGnumOfRgn)
self.scrollBox.facecolor.append(self.par_obj.colors[self.par_obj.TGnumOfRgn])
self.par_obj.TGnumOfRgn = self.par_obj.TGnumOfRgn + 1
self.scrollBox.generateList()
#refreshTable()
def freshDraw(self):
self.scrollBox.rect =[]
for i in range(0,self.scrollBox.x0.__len__()):
self.scrollBox.rect.append(plt.axvspan(self.scrollBox.x0[i], self.scrollBox.x1[i], facecolor=self.par_obj.colors | [i % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.win_obj.canvas5.draw()
| def redraw(self):
for i in range(0,self.scrollBox.rect.__len__()):
self.scrollBox.rect[i].remove()
self.scrollBox.rect =[]
for i in range(0,self.scrollBox.x0.__len__()):
self.scrollBox.rect.append(plt.axvspan(self.scrollBox.x0[i], self.scrollBox.x1[i], facecolor=self.par_obj.colors[i % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.win_obj.canvas5.draw()
class baseList(QtWidgets.QLabel):
def __init__(self):
super(baseList, self).__init__()
self.listId=0
def mousePressEvent(self,ev):
print(self.listId)
class FileDialog(QtWidgets.QMainWindow):
def __init__(self, win_obj, par_obj, fit_obj):
super(FileDialog, self).__init__()
self.initUI()
self.par_obj = par_obj
self.fit_obj = fit_obj
self.win_obj = win_obj
def initUI(self):
self.textEdit = QtWidgets.QTextEdit()
self.setCentralWidget(self.textEdit)
self.statusBar()
openFile = QtWidgets.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(300, 300, 350, 500)
self.setWindowTitle('File dialog')
def count(self):
print('workes')
#self.show()
def showDialog(self):
#Intialise Dialog.
fileInt = QtWidgets.QFileDialog()
try:
#Try and read the default location for a file.
f = open(os.path.expanduser('~')+'/FCS_Analysis/configLoad', 'r')
self.loadpath =f.readline()
f.close()
except:
#If not default will do.
self.loadpath = os.path.expanduser('~')+'/FCS_Analysis/'
#Create loop which opens dialog box and allows selection of files.
self.win_obj.update_correlation_parameters()
file_imports = fileInt.getOpenFileNames(self, 'Open a data file',self.loadpath, 'pt3 files (*.pt3);ptU files (*.ptU);asc files (*.asc);spc files (*.spc);All Files (*.*)')
bt = QtWidgets.QPushButton("cancel")
for c,filename in enumerate(file_imports[0]):
self.win_obj.image_status_text.setStyleSheet("QStatusBar{padding-left:8px;color:green;font-weight:regular;}")
self.win_obj.image_status_text.showMessage("Processing file "+str(c+1)+" of "+str(file_imports[0].__len__()))
self.fit_obj.app.processEvents()
pic = picoObject(filename,self.par_obj,self.fit_obj);
if pic.exit == True:
self.win_obj.image_status_text.setStyleSheet("QStatusBar{padding-left:8px;color:red;font-w |
tensorflow/datasets | tensorflow_datasets/audio/gtzan_music_speech/gtzan_music_speech_test.py | Python | apache-2.0 | 1,030 | 0.003883 | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WI | THOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GTZAN Music Speech dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.audio.gtzan_music_speech import gtzan_music_speech
class GTZANTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = gtzan_music_speech.GT | ZANMusicSpeech
SPLITS = {
"train": 1, # Number of fake train examples
}
DL_EXTRACT_RESULT = {"music_speech": ""}
if __name__ == "__main__":
testing.test_main()
|
openstreams/wflow | Scripts/wflow_flood_lib.py | Python | gpl-3.0 | 28,442 | 0.001864 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 08 16:12:29 2015
@author: winsemi
$Id: wflow_flood_lib.py $
$Date: 2016-04-07 12:05:38 +0200 (Thu, 7 Apr 2016) $
$Author: winsemi $
$Revision: $
$HeadURL: $
$Keywords: $
"""
import sys
import os
import configparser
import logging
import logging.handlers
import numpy as np
from osgeo import osr, gdal, gdalconst
import pcraster as pcr
import netCDF4 as nc
import cftime
def setlogger(logfilename, logReference, verbose=True):
"""
Set-up the logging system. Exit if this fails
"""
try:
# create logger
logger = logging.getLogger(logReference)
logger.setLevel(logging.DEBUG)
ch = logging.handlers.RotatingFileHandler(
logfilename, maxBytes=10 * 1024 * 1024, backupCount=5
)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
)
# add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
# add ch to logger
logg | er.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
| return logger, ch
except IOError:
print("ERROR: Failed to initialize logger with logfile: " + logfilename)
sys.exit(1)
def closeLogger(logger, ch):
logger.removeHandler(ch)
ch.flush()
ch.close()
return logger, ch
def close_with_error(logger, ch, msg):
logger.error(msg)
logger, ch = closeLogger(logger, ch)
del logger, ch
sys.exit(1)
def open_conf(fn):
config = configparser.ConfigParser()
config.optionxform = str
if os.path.exists(fn):
config.read(fn)
else:
print("Cannot open config file: " + fn)
sys.exit(1)
return config
def configget(config, section, var, default, datatype="str"):
"""
Gets a string from a config file (.ini) and returns a default value if
the key is not found. If the key is not found it also sets the value
with the default in the config-file
Input:
- config - python ConfigParser object
- section - section in the file
- var - variable (key) to get
- default - default value
- datatype='str' - can be set to 'boolean', 'int', 'float' or 'str'
Returns:
- value (str, boolean, float or int) - either the value from the config file or the default value
"""
Def = False
try:
if datatype == "int":
ret = config.getint(section, var)
elif datatype == "float":
ret = config.getfloat(section, var)
elif datatype == "boolean":
ret = config.getboolean(section, var)
else:
ret = config.get(section, var)
except:
Def = True
ret = default
# configset(config, section, var, str(default), overwrite=False)
default = Def
return ret
def get_gdal_extent(filename):
""" Return list of corner coordinates from a dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
gt = ds.GetGeoTransform()
# 'top left x', 'w-e pixel resolution', '0', 'top left y', '0', 'n-s pixel resolution (negative value)'
nx, ny = ds.RasterXSize, ds.RasterYSize
xmin = np.float64(gt[0])
ymin = np.float64(gt[3]) + np.float64(ny) * np.float64(gt[5])
xmax = np.float64(gt[0]) + np.float64(nx) * np.float64(gt[1])
ymax = np.float64(gt[3])
ds = None
return xmin, ymin, xmax, ymax
def get_gdal_geotransform(filename):
""" Return geotransform of dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
gt = ds.GetGeoTransform()
ds = None
return gt
def get_gdal_axes(filename, logging=logging):
geotrans = get_gdal_geotransform(filename)
# Retrieve geoTransform info
originX = geotrans[0]
originY = geotrans[3]
resX = geotrans[1]
resY = geotrans[5]
ds = gdal.Open(filename, gdal.GA_ReadOnly)
cols = ds.RasterXSize
rows = ds.RasterYSize
x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
ds = None
return x, y
def get_gdal_fill(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
geotrans = get_gdal_geotransform(filename)
# Retrieve geoTransform info
originX = geotrans[0]
originY = geotrans[3]
resX = geotrans[1]
resY = geotrans[5]
ds = None
return fill_value
def get_gdal_projection(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
WktString = ds.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(WktString)
ds = None
return srs
def get_gdal_rasterband(filename, band=1, logging=logging):
"""
:param filename: GDAL compatible raster file to read from
:param band: band number (default=1)
:param logging: logging object
:return: gdal dataset object, gdal rasterband object
"""
ds = gdal.Open(filename)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
return ds, ds.GetRasterBand(band) # there's only 1 band, starting from 1
def prepare_nc(
trg_file,
times,
x,
y,
metadata={},
logging=logging,
units="Days since 1900-01-01 00:00:00",
calendar="gregorian",
):
"""
This function prepares a NetCDF file with given metadata, for a certain year, daily basis data
The function assumes a gregorian calendar and a time unit 'Days since 1900-01-01 00:00:00'
"""
logger.info('Setting up "' + trg_file + '"')
times_list = cftime.date2num(times, units=units, calendar=calendar)
nc_trg = nc.Dataset(trg_file, "w")
logger.info("Setting up dimensions and attributes")
nc_trg.createDimension("time", 0) # NrOfDays*8
nc_trg.createDimension("lat", len(y))
nc_trg.createDimension("lon", len(x))
times_nc = nc_trg.createVariable("time", "f8", ("time",))
times_nc.units = units
times_nc.calendar = calendar
times_nc.standard_name = "time"
times_nc.long_name = "time"
times_nc[:] = times_list
y_var = nc_trg.createVariable("lat", "f4", ("lat",))
y_var.standard_name = "latitude"
y_var.long_name = "latitude"
y_var.units = "degrees_north"
x_var = nc_trg.createVariable("lon", "f4", ("lon",))
x_var.standard_name = "longitude"
x_var.long_name = "longitude"
x_var.units = "degrees_east"
y_var[:] = y
x_var[:] = x
projection = nc_trg.createVariable("projection", "c")
projection.long_name = "wgs84"
projection.EPSG_code = "EPSG:4326"
projection.proj4_params = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
projection.grid_mapping_name = "latitude_longitude"
# now add all attributes from user-defined metadata
for attr in metadata:
nc_trg.setncattr(attr, metadata[attr])
nc_trg.sync()
return nc_trg
def prepare_gdal(
filename,
x,
y,
format="GTiff",
logging=logging,
metadata={},
metadata_var={},
gdal_type=gdal.GDT_Float32,
zlib=True,
srs=None,
):
# prepare geotrans
xul = x[0] - (x[1] - x[0]) / 2
xres = x[1] - x[0]
yul = y[0] + (y[0] - y[1]) / 2
yres = y[1] - y[0]
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
driver = gdal.GetDriverByName("GTiff")
# Processing
logging.info(str("Preparing file {:s}").format(filename))
if zlib:
ds = driver.Create(filename, len(x), len(y), 1, gdal_typ |
vileopratama/vitech | src/openerp/tests/common.py | Python | mit | 15,778 | 0.001521 | # -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides unittest test cases and a few
helpers and classes to write tests.
"""
import errno
import glob
import importlib
import json
import logging
import os
import select
import subprocess
import threading
import time
import itertools
import unittest
import urllib2
import xmlrpclib
from contextlib import contextmanager
from datetime import datetime, timedelta
from pprint import pformat
import werkzeug
import openerp
from openerp import api
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
HOST = '127.0.0.1'
PORT = openerp.tools.config['xmlrpc_port']
# Useless constant, tests are aware of the content of demo data
ADMIN_USER_ID = openerp.SUPERUSER_ID
def get_db_name():
db = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not db and hasattr(threading.current_thread(), 'dbname'):
return threading.current_thread().dbname
return db
# For backwards-compatibility - get_db_name() should be used instead
DB = get_db_name()
def at_install(flag):
""" Sets the at-install state of a test, the flag is a boolean specifying
whether the test should (``True``) or should not (``False``) run during
module installation.
By default, tests are run right after installing the module, before
starting the installation of the next module.
"""
def decorator(obj):
obj.at_install = flag
return obj
return decorator
def post_install(flag):
""" Sets the post-install state of a test. The flag is a boolean
specifying whether the test should or should not run after a set of
module installations.
By default, tests are *not* run after installation of all modules in the
current installation set.
"""
def decorator(obj):
obj.post_install = flag
return obj
return decorator
class BaseCase(unittest.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.registry, self.cr and self.uid to be
initialized by subclasses.
"""
def cursor(self):
return self.registry.cursor()
def ref(self, xid):
""" Returns database ID for the provided :term:`external identifier`,
shortcut for ``get_object_reference``
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: registered id
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
def browse_ref(self, xid):
""" Returns a record object for the provided
:term:`external identifier`
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: :class:`~openerp.models.BaseModel`
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
@contextmanager
def _assertRaises(self, exception):
""" Context manager that clears the environment upon failure. """
with super(BaseCase, self).assertRaises(exception) as cm:
with self.env.clear_upon_failure():
yield cm
def assertRaises(self, exception, func=None, *args, **kwargs):
if func:
with self._assertRaises(exception):
func(*args, **kwargs)
else:
return self._assertRaises(exception)
def shortDescription(self):
doc = self._testMethodDoc
return doc and ' '.join(filter(None, map(str.strip, doc.splitlines()))) or None
class TransactionCase(BaseCase):
""" TestCase in which each test method is run in its own transaction,
and with its own cursor. The transaction is rolled back and the cursor
is closed after each test.
"""
def setUp(self):
self.registry = RegistryManager.get(get_db_name())
#: current transaction's cursor
self.cr = self.cursor()
self.uid = openerp.SUPERUSER_ID
#: :class:`~openerp.api.Environment` for the current test case
self.env = api.Environment(self.cr, self.uid, {})
@self.addCleanup
def reset():
# rollback and close the cursor, and reset the environments
self.registry.clear_caches()
self.env.reset()
self.cr.rollback()
self.cr.close()
def patch_order(self, model, order):
m_e = self.env[model]
m_r = self.registry(model)
old_order = m_e._order
@self.addCleanup
def cleanup():
m_r._order = type(m_e)._order = old_order
m_r._order = type(m_e)._order = order
class SingleTransactionCase(BaseCase):
""" TestCase in which all test methods are run in the same transaction,
the transaction is started with the first test method and rolled back at
the end of the last.
"""
@classmethod
def setUpClass(cls):
cls.registry = RegistryManager.get(get_db_name())
cls.cr = cls.registry.cursor()
cls.uid = openerp.SUPERUSER_ID
cls.env = | api.Environment(cls.cr, cls.uid, {})
@classmethod
def tearDownClass(cls):
# rollback and close the cursor, and reset the environments
cls.registry.clear_caches()
cls.env.reset()
cls.cr.rollback()
cls.cr.close()
savepoint_seq = itertools.count()
class SavepointCase(SingleTransactionCase):
""" Similar to :class:`SingleTransaction | Case` in that all test methods
are run in a single transaction *but* each test case is run inside a
rollbacked savepoint (sub-transaction).
Useful for test cases containing fast tests but with significant database
setup common to all cases (complex in-db test data): :meth:`~.setUpClass`
can be used to generate db test data once, then all test cases use the
same data without influencing one another but without having to recreate
the test data either.
"""
def setUp(self):
self._savepoint_id = next(savepoint_seq)
self.cr.execute('SAVEPOINT test_%d' % self._savepoint_id)
def tearDown(self):
self.cr.execute('ROLLBACK TO SAVEPOINT test_%d' % self._savepoint_id)
self.env.clear()
self.registry.clear_caches()
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HttpCase(TransactionCase):
""" Transactional HTTP TestCase with url_open and phantomjs helpers.
"""
def __init__(self, methodName='runTest'):
super(HttpCase, self).__init__(methodName)
# v8 api with correct xmlrpc exception handling.
self.xmlrpc_url = url_8 = 'http://%s:%d/xmlrpc/2/' % (HOST, PORT)
self.xmlrpc_common = xmlrpclib.ServerProxy(url_8 + 'common')
self.xmlrpc_db = xmlrpclib.ServerProxy(url_8 + 'db')
self.xmlrp |
jiivan/genoomy | genoome/genoome/wsgi.py | Python | mit | 1,563 | 0.00064 | """
WSGI config for genoome project.
This module contains the WSGI application used by Django's development server
and any produc | tion WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with | an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "genoome.settings.development")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
wainersm/buildbot | master/buildbot/data/builders.py | Python | gpl-2.0 | 4,882 | 0.000615 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.data import base
from buildbot.data import types
class BuilderEndpoint(base.Endpoint):
isCollection = False
pathPatterns = """
/builders/n:builderid
/masters/n:masterid/builders/n:builderid
"""
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
builderid = kwargs['builderid']
bdict = yield self.master.db.builders.getBuilder(builderid)
if not bdict:
defer.returnValue(None)
return
if 'masterid' in kwargs:
if kwargs['masterid'] not in bdict['masterids']:
defer.returnValue(None)
return
defer.returnValue(
dict(builderid=builderid,
name=bdict['name'],
masterids=bdict['masterids'],
description=bdict['description'],
tags=bdict['tags']))
class BuildersEndpoint(base.Endpoint):
isCollection = True
rootLinkName = 'builders'
pathPatterns = """
/builders
/masters/n:masterid/builders
"""
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
bdicts = yield self.master.db.builders.getBuilders(
masterid=kwargs.get('masterid', None))
defer.returnValue([
dict(builderid=bd['id'],
name=bd['name'],
masterids=bd['masterids'],
description=bd['description'],
tags=bd['tags'])
for bd in bdicts])
class Builder(base.ResourceType):
name = "builder"
plural = "builders"
endpoints = [BuilderEndpoint, BuildersEndpoint]
keyFields = ['builderid']
class EntityType(types.Entity):
builderid = types.Integer()
name = types.Identifier(50)
masterids = types.List(of=types.Integer())
description = types.NoneOk(types.String())
tags = types.List(of=types.String())
entityType = EntityType(name)
def __init__(self, master):
base.ResourceType.__init__(self, master)
@base.updateMethod
def findBuilderId(self, name):
return self.master.db.builders.findBuilderId(name)
@base.updateMeth | od
def updateBuilderInfo(self, builderid, description, tags):
return self.master.db.builders.updateBuilderInfo(builderid, description, tags)
@base.updateMethod
@defer.inlineCallbacks
def updateBuilderList(self, masterid, builderNames):
# get the "current" list of builders for this master, so | we know what
# changes to make. Race conditions here aren't a great worry, as this
# is the only master inserting or deleting these records.
builders = yield self.master.db.builders.getBuilders(masterid=masterid)
# figure out what to remove and remove it
builderNames_set = set(builderNames)
for bldr in builders:
if bldr['name'] not in builderNames_set:
builderid = bldr['id']
yield self.master.db.builders.removeBuilderMaster(
masterid=masterid, builderid=builderid)
self.master.mq.produce(('builders', str(builderid), 'stopped'),
dict(builderid=builderid, masterid=masterid,
name=bldr['name']))
else:
builderNames_set.remove(bldr['name'])
# now whatever's left in builderNames_set is new
for name in builderNames_set:
builderid = yield self.master.db.builders.findBuilderId(name)
yield self.master.db.builders.addBuilderMaster(
masterid=masterid, builderid=builderid)
self.master.mq.produce(('builders', str(builderid), 'started'),
dict(builderid=builderid, masterid=masterid, name=name))
@defer.inlineCallbacks
def _masterDeactivated(self, masterid):
# called from the masters rtype to indicate that the given master is
# deactivated
yield self.updateBuilderList(masterid, [])
|
cawka/packaging-PyNDN | examples/ndnChat/chat.py | Python | bsd-3-clause | 2,131 | 0.030502 | #
# Copyright (c) 2011, Regents of the University of California
# BSD license, See the COPYING file for more information
# Written by: Derek Kulinski <takeda@takeda.tk>
#
import curses, curses.wrapper, curses.textpad, threading, time, sys
from ChatNet import ChatNet, ChatServer
class ChatGUI(object):
def __init__(self, prefix):
self.prefix = prefix
self.stdscr = None
self.max_size = None
self.chat_sc_border = None
self.chat_sc = None
self.input_sc_border = None
self.input_sc = None
self.textbox = None
def window_setup(self):
self.max_size = self.stdscr.getmaxyx()
max_y, max_x = self.max_size
# Input
self.input_sc_border = curses.newwin(3, max_x, max_y - 3, 0)
self.input_sc_border.border()
self.input_sc_border.noutrefresh()
self.input_sc = curses.newwin(1, max_x - 2, max_y - 2, 1)
self.textbox = curses.textpad.Textbox(self.input_sc)
# Output
self.chat_sc_border = curses.newwin(max_y - 3, max_x)
self.chat_sc_border.border()
self.chat_sc_border.noutrefresh()
self.chat_sc = curses.newwin(max_y - 5, max_x - 2, 1, 1)
self.chat_sc.scrollok(True)
self.chat_sc.noutrefresh()
def write(self, text):
self.chat_sc.addstr(text + "\n")
self.chat_sc.noutrefresh()
def callback(self, nick, text):
self.write("<%s> %s" % (nick, text))
curses.doupdate()
def input_thread(self):
server = Ch | atServer(self.prefix)
thread = threading.Thread(target=server.lis | ten)
thread.start()
while True:
text = self.textbox.edit()
self.input_sc.erase()
if text == "":
continue
#self.write(text)
server.send_message(text)
def curses_code(self, stdscr):
self.stdscr = stdscr
self.window_setup()
curses.doupdate()
chatnet = ChatNet(self.prefix, self.callback)
thread = threading.Thread(target=self.input_thread)
thread.start()
while True:
chatnet.pullData()
time.sleep(1)
def usage():
#print(("Usage: %s <URI>" % sys.argv[0]), file=sys.stderr)
sys.stderr.write("Usage: %s <URI>\n" % sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
gui = ChatGUI(sys.argv[1])
curses.wrapper(gui.curses_code)
|
jawilson/home-assistant | tests/components/climate/test_device_condition.py | Python | apache-2.0 | 10,053 | 0.001492 | """The tests for Climate device conditions."""
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.climate import DOMAIN, const, device_condition
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.mark.parametrize(
"set_state,features_reg,features_state,expected_condition_types",
[
(False, 0, 0, ["is_hvac_mode"]),
(False, const.SUPPORT_PRESET_MODE, 0, ["is_hvac_mode", "is_preset_mode"]),
(True, 0, 0, ["is_hvac_mode"]),
(True, 0, const.SUPPORT_PRESET_MODE, ["is_hvac_mode", "is_preset_mode"]),
],
)
async def test_get_conditions(
hass,
device_reg,
entity_reg,
set_state,
features_reg,
features_state,
expected_condition_types,
):
"""Test we get the expected conditions from a climate."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
supported_features=features_reg,
)
if set_state:
hass.states.async_set(
f"{DOMAIN}.test_5678", "attributes", {"supported_features": features_state}
)
expected_conditions = []
expected_condition | s += [
{
"condition": "device",
"domain": DOMAIN,
"type": | condition,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
}
for condition in expected_condition_types
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_hvac_mode",
"hvac_mode": "cool",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_hvac_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_preset_mode",
"preset_mode": "away",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_preset_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_hvac_mode - event - test_event1"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
# Should not fire
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_preset_mode - event - test_event2"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_HOME,
},
)
# Should not fire
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize(
"set_state,capabilities_reg,capabilities_state,condition,expected_capabilities",
[
(
False,
{const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF]},
{},
"is_hvac_mode",
[
{
"name": "hvac_mode",
"options": [("cool", "cool"), ("off", "off")],
"required": True,
"type": "select",
}
],
),
(
False,
{const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY]},
{},
"is_preset_mode",
[
{
"name": "preset_mode",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
],
),
(
True,
{},
{const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF]},
"is_hvac_mode",
[
{
"name": "hvac_mode",
"options": [("cool", "cool"), ("off", "off")],
"required": True,
"type": "select",
}
],
),
(
True,
{},
{const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY]},
"is_preset_mode",
[
{
"name": "preset_mode",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
],
),
],
)
async def test_capabilities(
hass,
device_reg,
entity_reg,
set_state,
capabilities_reg,
capabilities_state,
condition,
expected_capabilities,
):
"""Test getting capabilities."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
|
josch149/pyjs-seminar | website/code/scalign.py | Python | gpl-2.0 | 4,124 | 0.008734 | # author : Johann-Mattis List
# email : mattis.list@uni-marburg.de
# created : 2015-07-11 11:27
# modified : 2015-07-11 11:27
"""
Carry out Sound-Class Based Alignment Analyses.
"""
__author__="Johann-Mattis List"
__date__="2015-07-11"
import json
def segment2class(segment, converter):
"""
Convert a segment to a sound-class schema.
"""
# erster versuch
try:
return converter[segment]
except KeyError:
# zweiter versuch
try:
return converter[segment[0]]
except KeyError:
# ansonsten, gib den "misserfolg"-character zurück
return '0'
def load_model(model):
"""
Load the converter for a sound-class model.
"""
# load the converter with json
converter = json.load(open(model+'.json'))
return converter
def segments2classes(segments, converter):
"""
Convert a sound string to a sound-class string.
"""
# check for segmented string
if type(segments) == str:
segments = segments.split(' ')
# convert the segments
classes = [segment2class(x, converter) for x in segments]
return classes
def wf_align(seqA, seqB):
"""
Align two sequences using the Wagner-Fisher algorithm.
"""
# check for empty seqs
if not seqA or not seqB:
return
# store length of sequences
m = len(seqA)+1
n = len(seqB)+1
# create matrix and traceback
M = [[0 for i in range(n)] for j in range(m)]
T = [[0 for i in range(n)] for j in range(m)]
# initialize M and T
for i in range(m):
M[i][0] = i
for i in range(n):
M[0][i] = i
for i in range(1,m):
T[i][0] = 1
for i in range(1,n):
T[0][i] = 2
# start the main loop
for i in range(1,m):
for j in range(1,n):
# get the chars
charA = seqA[i-1]
charB = seqB[j-1]
# check identity
if charA == charB:
match = M[i-1][j-1]
else:
match = M[i-1][j-1] + 1
# get the gaps
gapA = M[i-1][j] + 1
gapB = M[i][j-1] + 1
# compare the stuff
if match <= gapA and match <= gapB:
| M[i][j] = match
elif gapA <= gapB:
M[i][j] = gapA
T[i][j] = 1 # don't forget the traceback
else:
M[i][j] = gapB
T[i][j] = 2 # don't forget the traceback
# get the edit distance
ED = M[i][j]
# start the traceback
i,j = m-1,n-1
almA,almB = [],[] |
while i > 0 or j > 0:
if T[i][j] == 0:
almA += [seqA[i-1]]
almB += [seqB[j-1]]
i -= 1
j -= 1
elif T[i][j] == 1:
almA += [seqA[i-1]]
almB += ["-"]
i -= 1
else:
almA += ["-"]
almB += [seqB[j-1]]
j -= 1
# reverse
almA = almA[::-1]
almB = almB[::-1]
return almA,almB,ED
def classes2segments(classes, segments):
"""
Convert an aligned string of sound classes back to the string of segments.
"""
idx = len(segments)-1
out = []
for i in range(len(classes)-1,-1,-1):
print(i,classes[i])
if classes[i] == '-':
out += ['-']
else:
out += [segments[idx]]
idx -= 1
return out[::-1]
def sca_align(stringA, stringB, model="dolgo"):
"""
Carry out sound-class based alignment analysis.
"""
# check for strings passed as such
if type(stringA) == str:
stringA = stringA.split(' ')
stringB = stringB.split(' ')
# load the converter
converter = load_model(model)
# Konvertierung
seqA = segments2classes(stringA, converter)
seqB = segments2classes(stringB, converter)
# Alinierung
almA, almB, ED = wf_align(seqA, seqB)
# Rück-Konvertierung
outA = classes2segments(almA, stringA)
outB = classes2segments(almB, stringB)
return outA, outB, ED
|
joostvdg/jenkins-job-builder | tests/general/test_general.py | Python | apache-2.0 | 1,118 | 0 | # Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIN | D, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from testscenarios.testcase import TestWithScenarios
from testtools import TestCase
from jenkins_jobs.modules import general
from tests.base import BaseTestCase
from tests.base import get_scenarios
class TestCaseModuleGeneral(TestWithScenarios, BaseTestCase, TestCase):
fi | xtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = get_scenarios(fixtures_path)
klass = general.General
|
fishilico/shared | python/clang_cfi_typeid.py | Python | mit | 16,141 | 0.002416 | #!/usr/bin/env python3
"""Compute the CallSiteTypeId of some function types for clang CFI (Control Flow Integrity).
When calling a function indirectly, clang could introduce a call to a function
which checks the type of the called function:
void __cfi_check(uint64 CallSiteTypeId, void *TargetAddr, void *DiagData)
void __cfi_slowpath(uint64 CallSiteTypeId, void *TargetAddr)
void __cfi_slowpath_diag(uint64 CallSiteTypeId, void *TargetAddr, void *DiagData)
The CallSiteTypeId is the MD5 of the typeinfo string, truncated to a 64-bit
integer.
Documentation:
* https://clang.llvm.org/docs/ControlFlowIntegrityDesign.html
* https://struct.github.io/cross_dso_cfi.html
* https://github.com/llvm-mirror/clang/blob/release_80/lib/CodeGen/CodeGenModule.cpp#L1159-L1164
Computation: llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
"""
import hashlib
import subprocess
import sys
KNOWN_TYPEINFO_NAMES = (
# Basic types
("_ZTSa", 0xc4adbb09d853c709, "signed char"),
("_ZTSb", 0x0419a17afc467250, "bool"),
("_ZTSc", 0xab9b79bbaecc55c1, "char"),
("_ZTSd", 0x8a08189bc493b04a, "double"),
("_ZTSe", 0x3f854339f3ac20f9, "long double"),
("_ZTSf", 0x23c0f1ed28f3cc03, "float"),
("_ZTSg", 0x5f17fe5918364716, "__float128"),
("_ZTSh", 0xbf7dfd2059d35aa4, "unsigned char"),
("_ZTSi", 0xf683aa7bca520998, "int"),
("_ZTSj", 0xaf45dda182130b19, "unsigned int"),
("_ZTSl", 0xcfb829a70b269ec2, "long"),
("_ZTSm", 0xca1f38b788aa7176, "unsigned long"),
("_ZTSn", 0x0c7feb997636baa9, "__int128"),
("_ZTSo", 0xf1d24b5b970dbef0, "unsigned __int128"),
("_ZTSs", 0x52e8e3b548517584, "short"),
("_ZTSt", 0xaceb8bc3ab2fb3b1, "unsigned short"),
("_ZTSv", 0xb22fd0e46e167541, "void"),
("_ZTSw", 0xa15acd2d4b3ba9f1, "wchar_t"),
("_ZTSx", 0xdc031bdfaff3779f, "long long"),
| ("_ZTSy", 0x1639a2a5e21b1916, "unsigned long long"),
("_ZTSz", 0xfd | 5f5dc16053a2a4, "..."),
# Two letters, with type modifiers. There are also possible three-letter combinations
("_ZTSra", 0x88f6d4d6eab1df2e, "signed char restrict"),
("_ZTSrb", 0xad687ff06e782ded, "bool restrict"),
("_ZTSrc", 0xdba4d1ceedf018fb, "char restrict"),
("_ZTSrd", 0x0430abdad346705c, "double restrict"),
("_ZTSre", 0x8dc39e9e89741fce, "long double restrict"),
("_ZTSrf", 0x957e6ed91e3c376a, "float restrict"),
("_ZTSrg", 0x075bdd5ca07eed4b, "__float128 restrict"),
("_ZTSrh", 0x789e71a51827091f, "unsigned char restrict"),
("_ZTSri", 0xe2595d5c10100d92, "int restrict"),
("_ZTSrj", 0xa24f106305a9cbf1, "unsigned int restrict"),
("_ZTSrl", 0x4d1bbc1c9df5e7e0, "long restrict"),
("_ZTSrm", 0x284a24a02095fd7d, "unsigned long restrict"),
("_ZTSrn", 0x72428e9cb7967f30, "__int128 restrict"),
("_ZTSro", 0x86a3183f853ace33, "unsigned __int128 restrict"),
("_ZTSrs", 0x73de5486e596e42b, "short restrict"),
("_ZTSrt", 0x54a1ef2e905ff10a, "unsigned short restrict"),
("_ZTSrv", 0x51b28ed0942b3fce, "void restrict"),
("_ZTSrw", 0xd23b046f37e877e5, "wchar_t restrict"),
("_ZTSrx", 0xb728bad8284b61a0, "long long restrict"),
("_ZTSry", 0x38d32f23243bcd4a, "unsigned long long restrict"),
("_ZTSrz", 0xbb41cedf5e8065bf, "... restrict"),
("_ZTSCa", 0x4ff5d986e474fad1, "signed char _Complex", "signed charcomplex"),
("_ZTSCb", 0x4816f8ad6979290e, "bool _Complex", "boolcomplex"),
("_ZTSCc", 0x7fa8f59b01ebf074, "char _Complex", "charcomplex"),
("_ZTSCd", 0x845c45c213cadb6a, "double _Complex", "doublecomplex", "doublecomplex"),
("_ZTSCe", 0x64acad2a49d24cc5, "long double _Complex", "long doublecomplex"),
("_ZTSCf", 0x60a787870523ccec, "float _Complex", "floatcomplex"),
("_ZTSCg", 0x3f4c7013309344bf, "__float128 _Complex", "__float128complex"),
("_ZTSCh", 0x569fad76e8daf47e, "unsigned char _Complex", "unsigned charcomplex"),
("_ZTSCi", 0x2361d577e2e092ee, "int _Complex", "intcomplex"),
("_ZTSCj", 0xbb363c22fb04e032, "unsigned int _Complex", "unsigned intcomplex"),
("_ZTSCl", 0xdd3fc3e7a36ca4b9, "long _Complex", "longcomplex"),
("_ZTSCm", 0x689f36a7b3b0d664, "unsigned long _Complex", "unsigned longcomplex"),
("_ZTSCn", 0xcf583a5324ef7dae, "__int128 _Complex", "__int128complex"),
("_ZTSCo", 0x219cb01a36368fb7, "unsigned __int128 _Complex", "unsigned __int128complex"),
("_ZTSCs", 0xfe00a35d68300ff3, "short _Complex", "shortcomplex"),
("_ZTSCt", 0x251b44bbe7a85f0b, "unsigned short _Complex", "unsigned shortcomplex"),
("_ZTSCv", 0x03ff01e73a793c5c, "void _Complex", "voidcomplex"),
("_ZTSCw", 0x298f4f7f9542de9a, "wchar_t _Complex", "wchar_tcomplex"),
("_ZTSCx", 0xc57f64d471fe4abe, "long long _Complex", "long longcomplex"),
("_ZTSCy", 0xd1c0634e6902a868, "unsigned long long _Complex", "unsigned long longcomplex"),
("_ZTSCz", 0xe38311a225a60dba, "... _Complex", "...complex"),
("_ZTSDa", 0xfb10158115c295e9, "auto"),
("_ZTSDc", 0x5d8409bc536008a9, "decltype(auto)", None),
("_ZTSDd", 0xd083f9d47886cafd, "decimal64"),
("_ZTSDe", 0xb5cf4b4111bdd3d3, "decimal128"),
("_ZTSDf", 0xe1e952e08c0ad611, "decimal32"),
("_ZTSDh", 0x3f7ef17cc32b0dd4, "half"),
("_ZTSDi", 0x503a7dc8daab6211, "char32_t"),
("_ZTSDn", 0xb22d832571e7f23f, "decltype(nullptr)"),
("_ZTSDs", 0xcc352155d790d4ca, "char16_t"),
("_ZTSDu", 0x6b9922ed202cc991, "char8_t", None),
("_ZTSGa", 0x002501a685a95653, "signed char _Imaginary", "signed charimaginary"),
("_ZTSGb", 0x6ab5cd51f02c10c7, "bool _Imaginary", "boolimaginary"),
("_ZTSGc", 0xc204b0de682e3c32, "char _Imaginary", "charimaginary"),
("_ZTSGd", 0xe100c2d34d960fb2, "double _Imaginary", "doubleimaginary"),
("_ZTSGe", 0x157cb03cb1192149, "long double _Imaginary", "long doubleimaginary"),
("_ZTSGf", 0xac7f2b45e7f73f1d, "float _Imaginary", "floatimaginary"),
("_ZTSGg", 0x2d9d5727947746cf, "__float128 _Imaginary", "__float128imaginary"),
("_ZTSGh", 0x5dc5f758dd6a03c0, "unsigned char _Imaginary", "unsigned charimaginary"),
("_ZTSGi", 0xdcdf69d9389c2c4e, "int _Imaginary", "intimaginary"),
("_ZTSGj", 0xd3e11d44de988bd7, "unsigned int _Imaginary", "unsigned intimaginary"),
("_ZTSGl", 0x8a88d8c9f3c559a5, "long _Imaginary", "longimaginary"),
("_ZTSGm", 0x585ea9eab5c9db23, "unsigned long _Imaginary", "unsigned longimaginary"),
("_ZTSGn", 0x7d180229bea09e6c, "__int128 _Imaginary", "__int128imaginary"),
("_ZTSGo", 0x32096bfc85fd2c21, "unsigned __int128 _Imaginary", "unsigned __int128imaginary"),
("_ZTSGs", 0x5cb4f3abbb9d93ed, "short _Imaginary", "shortimaginary"),
("_ZTSGt", 0x726b1738c858485d, "unsigned short _Imaginary", "unsigned shortimaginary"),
("_ZTSGv", 0x0367259a0ed71ac3, "void _Imaginary", "voidimaginary"),
("_ZTSGw", 0xf0269ef5f72cdcd3, "wchar_t _Imaginary", "wchar_timaginary"),
("_ZTSGx", 0xf4e5046d5c0fa4da, "long long _Imaginary", "long longimaginary"),
("_ZTSGy", 0xe6de9a2c91f3e165, "unsigned long long _Imaginary", "unsigned long longimaginary"),
("_ZTSGz", 0x01f833e8ffbe3fc6, "... _Imaginary", "...imaginary"),
("_ZTSKa", 0xabc0ed19f744a038, "signed char const"),
("_ZTSKb", 0x7ffbb567d1d339df, "bool const"),
("_ZTSKc", 0x85276d0a7ead5d42, "char const"),
("_ZTSKd", 0x0e1778364c39409a, "double const"),
("_ZTSKe", 0xa582913a0d15d618, "long double const"),
("_ZTSKf", 0xd4037182ba82510e, "float const"),
("_ZTSKg", 0x39c4f4d9e1fbbc1c, "__float128 const"),
("_ZTSKh", 0x2edf4cf792b50b63, "unsigned char const"),
("_ZTSKi", 0x39f908a442235703, "int const"),
("_ZTSKj", 0x6c17cf52f410f643, "unsigned int const"),
("_ZTSKl", 0x92248e035d6df962, "long const"),
("_ZTSKm", 0xabcc875caef95524, "unsigned long const"),
("_ZTSKn", 0xe365174c6c12b68d, "__int128 const"),
("_ZTSKo", 0xeba7a5232d519954, "unsigned __int128 const"),
("_ZTSKs", 0xc112ecde455d0d53, "short const"),
("_ZTSKt", 0xaf75564e4dabb2fd, "unsigned short const"),
("_ZTSKv", 0x9a1f80bc01aa1992, "void const"),
("_ZTSKw", 0x46e4b3e37c328aac, "wchar_t const"),
("_ZTSKx", 0xe18a330148ba17a0, "long long const"),
("_ZTSKy", 0x6b13850dd23c6414, "unsi |
NaturalEcon/RDb | NatEcon/urls.py | Python | gpl-3.0 | 433 | 0.004619 | from django.conf.urls import patterns, include, url
from RDb import views
from django.contrib i | mport admin
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'views.index', name='index'),
url(r'^RDb/', include('RDb.urls')),
url(r'^admin/', include(admin.site.urls)),
) + static(settings.STATIC_URL, document_root=settings.STATIC_RO | OT)
|
weigj/django-multidb | tests/regressiontests/model_fields/tests.py | Python | bsd-3-clause | 1,604 | 0 | """
>>> from django.db.models.fields import *
>>> try:
... from decimal import Decimal
... except ImportError:
... from django.utils._decimal import Decimal
# DecimalField
>>> f = DecimalField(max_digits=4, decimal_places=2)
>>> f.to_python(3) == Decimal("3")
True
>>> f.to_python("3.14") == Decimal("3.14")
True
>>> f.to_python("abc")
Traceback (most recent call last):
...
ValidationError: This value must be a decimal number.
>>> f = DecimalField(max_digits=5, decimal_places=1)
>>> x = f.to_python(2)
>>> y = f.to_python('2.6')
>>> f._format(x)
u'2.0'
>>> f._format(y)
u'2.6'
>>> f._format(None)
>>> f.get_db_prep_lookup('exact', None)
[None]
# DateTimeField and TimeField to_python should support usecs:
>>> f = DateTimeField()
>>> f.to_python('2001-01-02 03:04:05.000006')
datetime.datetime(2001, 1, 2, 3, 4, 5, 6)
>>> f.to_python('2001-01-02 03:04:05.999999')
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999)
>>> f = TimeField()
>>> f.to_python('01:02:03.000004')
datetime.time(1, 2, 3, 4)
>>> f.to_python('01:02:03.999999')
datetime.time(1, 2, 3, 999999)
# Boolean and null boolean fields
>>> f = BooleanField()
>>> for val in (True, '1', 1):
... f.get_db_prep_lookup('exact', val)
[True]
[True]
[True]
>>> for val in (False, '0', 0):
.. | . f.get_db_prep_lookup('exact', val)
[False]
[False]
[False]
>>> f = NullBooleanField()
>>> for val in (True, '1', 1):
... f.get_db_prep_lookup('exact', val)
[True]
[True]
[True]
>>> for val in (False, '0', 0):
... f.get_db_prep_lookup('exact', val)
[False]
[False]
[False]
>>> f.get_db_prep_lookup('exact', No | ne)
[None]
"""
|
Gehn/JustAChatBot | sleekxmpp/plugins/xep_0079/amp.py | Python | mit | 2,578 | 0.001552 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permissio
"""
import logging
from sleekxmpp.stanza import Message, Error, StreamFeatures
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.matcher import StanzaPath, MatchMany
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0079 import stanza
log = logging.getLogger(__name__)
class XEP_0079(BasePlugin):
"""
XEP-0079 Advanced Message Processing
"""
name = 'xep_0079'
description = 'XEP-0079: Advanced Message Processing'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Message, stanza.AMP)
register_stanza_plugin(Error, stanza.InvalidRules)
register_stanza_plugin(Error, stanza.UnsupportedConditions)
register_stanza_plugin(Error, stanza.UnsupportedActions)
register_stanza_plugin(Error, stanza.FailedRules)
self.xmpp.register_handler(
Callback('AMP Response',
MatchMany([
| StanzaPath('message/error/failed_rules'),
StanzaPath('message/amp')
]),
self._handle_amp_response))
if not self.xmpp.is_component:
| self.xmpp.register_feature('amp',
self._handle_amp_feature,
restart=False,
order=9000)
register_stanza_plugin(StreamFeatures, stanza.AMPFeature)
def plugin_end(self):
self.xmpp.remove_handler('AMP Response')
def _handle_amp_response(self, msg):
log.debug('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
if msg['type'] == 'error':
self.xmpp.event('amp_error', msg)
elif msg['amp']['status'] in ('alert', 'notify'):
self.xmpp.event('amp_%s' % msg['amp']['status'], msg)
def _handle_amp_feature(self, features):
log.debug('Advanced Message Processing is available.')
self.xmpp.features.add('amp')
def discover_support(self, jid=None, **iqargs):
if jid is None:
if self.xmpp.is_component:
jid = self.xmpp.server_host
else:
jid = self.xmpp.boundjid.host
return self.xmpp['xep_0030'].get_info(
jid=jid,
node='http://jabber.org/protocol/amp',
**iqargs)
|
ashwinm76/alienfx | alienfx/core/controller_m15x.py | Python | gpl-3.0 | 4,057 | 0.003451 | #
# controller_m15x.py
#
# Copyright (C) 2013-2014 Ashwin Menon <ashwin.menon@gmail.com>
# Copyright (C) 2015-2018 Track Master Steve <trackmastersteve@gmail.com>
#
# Alienfx is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Alienfx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with alienfx. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
""" Specialization of the AlienFxController class for the M15x controller.
This module provides the following classes:
AlienFXControllerM15x : M15x controller
"""
import alienfx.core.controller as alienfx_controller
class AlienFXControllerM15x(alienfx_controller.AlienFXController):
""" Specialization of the AlienFxController class for the M15x controller.
"""
# Speed capabilities. The higher the number, the slower the speed of
# blink/morph actions. The min speed is selected by trial and error as
# the lowest value that will not result in strange blink/morph behaviour.
DEFAULT_SPEED = 200
MIN_SPEED = 50
# Zone codes
LEFT_KEYBOARD = 0x0001
MIDDLE_LEFT_KEYBOARD = 0x0002
MIDDLE_RIGHT_KEYBOARD = 0x0004
RIGHT_KEYBOARD = 0x0008
# Both speakers change together
RIGHT_SPEAKER = 0x0020
LEFT_SPEAKER = 0x0040
ALIEN_HEAD = 0x0080
LOGO = 0x0100
TOUCH_PAD = 0x0200
MEDIA_BAR = 0x0800
POWER_BUTTON = 0x2000
HDD_LEDS = 0x4000
# Reset codes
RESET_ALL_LIGHTS_OFF = 3
RESET_ALL_LIGHTS_ON = 4
# State codes
BOOT = 1
AC_SLEEP = 2
AC_CHARGED = 5
AC_CHARGING = 6
BATTERY_SLEEP = 7
BATTERY_ON = 8
BATTERY_CRITICAL = 9
def __init__(self):
alienfx_controller.AlienFXController.__init__(self)
self.name = "Alienware M15x"
# USB VID and PID
self.vendor_id = 0x187c
self.product_id = 0x0512
# map the zone names to their codes
self.zone_map = {
self.ZONE_LEFT_KEYBOARD: self.LEFT_KEYBOARD,
self.ZONE_MIDDLE_LEFT_KEYBOARD: self.MIDDLE_LEFT_KEYBOARD,
self.ZONE_MIDDLE_RIGHT_KEYBOARD: self.MIDDLE_RIGHT_KEYBOARD,
self.ZONE_RIGHT_KEYBOARD: self.RIGHT_KEYBOARD,
self.ZONE_RIGHT_SPEAKER: self.RIGHT_SPEAKER,
self.ZONE_LEFT_SPEAKER: self.LEFT_SPEAKER,
self.ZONE_ALIEN_HEAD: self.ALIEN_HEAD,
self.ZONE_LOGO: self.LOGO,
self.ZONE_TOUCH_PAD: self.TOUCH_PAD,
self.ZONE_MEDIA_BAR: self.MEDIA_BAR,
self.ZONE_POWER_BUTTON: self.POWER_BUTTON,
self.ZONE_HDD_LEDS: self.HDD_LEDS,
}
# zones that have special behaviour in the different power states
self.power_zones = [
self.ZONE_POWER_BUTTON,
self.ZONE_HDD_LEDS
]
# map the reset names to their codes
self.reset_types = {
self.RESET_ALL_LIGHTS_OFF: "all-lights-off",
self.RESET_ALL_LIGHTS_ON: "all-lights-on"
}
# map the state names to their codes
self.state_map = {
self.STATE_BOOT: self.BOOT,
| self.STATE_AC_SLEEP: self.AC_SLEEP,
self.STATE_AC_CHARGED: self.AC_CHARGED,
self.STATE_AC_CHARGING: self.AC_CHARGING,
self.STATE_BATTERY_SLEEP: self.BATTERY_SLEEP,
self.STATE_BATTERY_ON: self.BATTERY_ON,
self.STATE_BATTERY_CRITICAL: self.BATTERY_CRITICAL
}
alienfx_controller.AlienFXController.supported_controllers.append(
AlienFXControllerM15 | x())
|
kmcginn/advent-of-code | 2016/day04/security.py | Python | mit | 922 | 0.003254 | from collections import defaultdict
from operator import itemgetter
import re
def isRealRoom(name, checksum):
if len(checksum) != 5:
raise Exception
totals = defaultdict(int)
for c in name:
if c != '-':
totals[c] += 1
pairs = zip(totals.keys(), totals.values())
alphaPairs = sorted(pairs, key=itemgetter(0))
freqPairs = sorted(alphaPairs, key=itemgetter(1), reverse=True)
genCheckSum = ''
for a, b in freqPairs:
genCheckSum += a
return genCheckSum[:5] == checksum
def main():
f = open('input.txt', 'r')
sectorSum = 0
for line in f:
room, metadata = line.rsplit('-', 1)
match = re.search(r'(\d+)\[(.{5})\]', metadata)
sector = int(match.group(1))
checksum = match.group(2)
| if(isRealRoom(room, checksum)):
sectorSum += sector
print | (sectorSum)
if __name__ == "__main__":
main()
|
jeanmask/opps | opps/core/management/commands/exportcontainerbox.py | Python | mit | 546 | 0 | from django.core.management.base import BaseCommand
from django.core import serializers
from opps.boxes.models import QuerySet
fro | m opps.channels.models import Channel
from opps.containers.models import ContainerBox
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, ContainerBox, QuerySet]
for m in models:
data = serializers.serialize("json", m.objects.all())
out = open("opps_{0}.json".format(m.__name__), "w")
| out.write(data)
out.close()
|
factorlibre/odoo-addons-cpo | purchase_compute_order_product_filter_season/models/computed_purchase_order.py | Python | agpl-3.0 | 696 | 0 | # -*- coding: utf-8 -*-
# © 2016 FactorLibre - Hugo Santos <hug | o.santos@factorlibre.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
class ComputedPurchaseOrder(models.Model):
_inherit = 'computed.purchase.order'
product_season = fi | elds.Many2one('product.season', 'Product Season')
@api.multi
def _active_product_stock_product_domain(self, psi_ids):
product_domain = super(ComputedPurchaseOrder, self).\
_active_product_stock_product_domain(psi_ids)
if self.product_season:
product_domain.append(('season_id', '=', self.product_season.id))
return product_domain
|
eufarn7sp/egads-eufar | egads/thirdparty/nappy/nc_interface/na_to_nc.py | Python | bsd-3-clause | 3,191 | 0.008461 | # Copyright (C) 2004 CCLRC & NERC( Natural Environment Research Council ).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later. http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
na_to_nc.py
===========
Contains the NAToNC class for converting a NASA Ames file to a NetCDF file.
"""
# Imports from python standard library
import logging
# Imports from external packages
try:
import cdms2 as cdms
except:
try:
import cdms
except:
raise Exception("Could not import third-party software. Nappy requires the CDMS and Numeric packages to be installed to convert to CDMS and NetCDF.")
# Import from nappy package
import nappy.nc_interface.na_to_cdms
from nappy.na_error import na_error
logging.basicConfig()
log = logging.getLogger(__name__)
class NAToNC(nappy.nc_interface.na_to_cdms.NADictToCdmsObjects):
"""
Converts a NASA Ames file to a NetCDF file.
"""
def __init__(self, na_file, variables=None, aux_variables=None,
global_attributes=[("Conventions","CF-1.0")],
time_units=None, time_warning=True,
rename_variables={}):
"""
Sets up instance variables. Note that the argument 'na_file' has a relaxes definition
and can be either a NASA Ames file object or the name of a NASA AMES file.
Typical usage is:
>>> import nappy.nc_interface.na_to_nc as na_to_nc
>>> c = na_to_nc.NAToNC("old_file.na")
>>> c.convert()
>>> c.writeNCFile("new_file.nc")
"""
# First open na_file if it is a file rather than an na_file object
na_file_obj = na_file
print na_file_obj, type(na_file_obj)
if type(na_file_obj) == type("string"):
na_file_obj = nappy.openNAFile(na_file_obj)
nappy.nc_interface.na_to_cdms.NADictToCdmsObjects.__init__(self, na_file_obj, variables=variables,
aux_variables=aux_variables,
global_attributes=global_attributes,
time_units | =time_units, time_warning=time_warning,
rename_variables=rename_variables)
def writeNCFile(self, file_name, mode="w"):
"""
Writes the NASA Ames content that has been converted into CDMS objects to a
NetCDF file of name 'file_name'. Note that mode can be set to append so you
can add the data to an existing file.
"""
if not self.converted:
self.convert()
# Create CDMS | output file object
fout = cdms.open(file_name, mode=mode)
# Write main variables
for var in self.cdms_variables:
fout.write(var)
# Write aux variables
for avar in self.cdms_aux_variables:
fout.write(avar)
# Write global attributes
for (att, value) in self.global_attributes:
setattr(fout, att, value)
fout.close()
log.info("NetCDF file '%s' written successfully." % file_name)
return True
|
sekikn/ambari | ambari-common/src/main/python/ambari_ws4py/streaming.py | Python | apache-2.0 | 13,075 | 0.002141 | # -*- coding: utf-8 -*-
import struct
from struct import unpack
from ambari_ws4py.utf8validator import Utf8Validator
from ambari_ws4py.messaging import TextMessage, BinaryMessage, CloseControlMessage,\
PingControlMessage, PongControlMessage
from ambari_ws4py.framing import Frame, OPCODE_CONTINUATION, OPCODE_TEXT, \
OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG
from ambari_ws4py.exc import FrameTooLargeException, ProtocolException, InvalidBytesError,\
TextFrameEncodingException, UnsupportedFrameTypeException, StreamClosed
from ambari_ws4py.compat import py3k
VALID_CLOSING_CODES = [1000, 1001, 1002, 1003, 1007, 1008, 1009, 1010, 1011]
class Stream(object):
def __init__(self, always_mask=False, expect_masking=True):
""" Represents a websocket stream of bytes flowing in and out.
The stream doesn't know about the data provider itself and
doesn't even know about sockets. Instead the stream simpl | y
yields for more bytes whenever it requires them. The stream owner
is responsible to provide the stream with those bytes until
a frame can be interpreted.
.. code-block:: python
:linenos:
>>> s = Stream()
>>> s.parser.send(BYTES)
>>> s.has_messages
False
>>> s.parser.send(MORE | _BYTES)
>>> s.has_messages
True
>>> s.message
<TextMessage ... >
Set ``always_mask`` to mask all frames built.
Set ``expect_masking`` to indicate masking will be
checked on all parsed frames.
"""
self.message = None
"""
Parsed test or binary messages. Whenever the parser
reads more bytes from a fragment message, those bytes
are appended to the most recent message.
"""
self.pings = []
"""
Parsed ping control messages. They are instances of
:class:`ws4py.messaging.PingControlMessage`
"""
self.pongs = []
"""
Parsed pong control messages. They are instances of
:class:`ws4py.messaging.PongControlMessage`
"""
self.closing = None
"""
Parsed close control messsage. Instance of
:class:`ws4py.messaging.CloseControlMessage`
"""
self.errors = []
"""
Detected errors while parsing. Instances of
:class:`ws4py.messaging.CloseControlMessage`
"""
self._parser = None
"""
Parser in charge to process bytes it is fed with.
"""
self.always_mask = always_mask
self.expect_masking = expect_masking
@property
def parser(self):
if self._parser is None:
self._parser = self.receiver()
# Python generators must be initialized once.
next(self.parser)
return self._parser
def _cleanup(self):
"""
Frees the stream's resources rendering it unusable.
"""
self.message = None
if self._parser is not None:
if not self._parser.gi_running:
self._parser.close()
self._parser = None
self.errors = None
self.pings = None
self.pongs = None
self.closing = None
def text_message(self, text):
"""
Returns a :class:`ws4py.messaging.TextMessage` instance
ready to be built. Convenience method so
that the caller doesn't need to import the
:class:`ws4py.messaging.TextMessage` class itself.
"""
return TextMessage(text=text)
def binary_message(self, bytes):
"""
Returns a :class:`ws4py.messaging.BinaryMessage` instance
ready to be built. Convenience method so
that the caller doesn't need to import the
:class:`ws4py.messaging.BinaryMessage` class itself.
"""
return BinaryMessage(bytes)
@property
def has_message(self):
"""
Checks if the stream has received any message
which, if fragmented, is now completed.
"""
if self.message is not None:
return self.message.completed
return False
def close(self, code=1000, reason=''):
"""
Returns a close control message built from
a :class:`ws4py.messaging.CloseControlMessage` instance,
using the given status ``code`` and ``reason`` message.
"""
return CloseControlMessage(code=code, reason=reason)
def ping(self, data=''):
"""
Returns a ping control message built from
a :class:`ws4py.messaging.PingControlMessage` instance.
"""
return PingControlMessage(data).single(mask=self.always_mask)
def pong(self, data=''):
"""
Returns a ping control message built from
a :class:`ws4py.messaging.PongControlMessage` instance.
"""
return PongControlMessage(data).single(mask=self.always_mask)
def receiver(self):
"""
Parser that keeps trying to interpret bytes it is fed with as
incoming frames part of a message.
Control message are single frames only while data messages, like text
and binary, may be fragmented accross frames.
The way it works is by instanciating a :class:`wspy.framing.Frame` object,
then running its parser generator which yields how much bytes
it requires to performs its task. The stream parser yields this value
to its caller and feeds the frame parser.
When the frame parser raises :exc:`StopIteration`, the stream parser
tries to make sense of the parsed frame. It dispatches the frame's bytes
to the most appropriate message type based on the frame's opcode.
Overall this makes the stream parser totally agonstic to
the data provider.
"""
utf8validator = Utf8Validator()
running = True
frame = None
while running:
frame = Frame()
while 1:
try:
some_bytes = (yield next(frame.parser))
frame.parser.send(some_bytes)
except GeneratorExit:
running = False
break
except StopIteration:
frame._cleanup()
some_bytes = frame.body
# Let's avoid unmasking when there is no payload
if some_bytes:
if frame.masking_key and self.expect_masking:
some_bytes = frame.unmask(some_bytes)
elif not frame.masking_key and self.expect_masking:
msg = CloseControlMessage(code=1002, reason='Missing masking when expected')
self.errors.append(msg)
break
elif frame.masking_key and not self.expect_masking:
msg = CloseControlMessage(code=1002, reason='Masked when not expected')
self.errors.append(msg)
break
else:
# If we reach this stage, it's because
# the frame wasn't masked and we didn't expect
# it anyway. Therefore, on py2k, the bytes
# are actually a str object and can't be used
# in the utf8 validator as we need integers
# when we get each byte one by one.
# Our only solution here is to convert our
# string to a bytearray.
some_bytes = bytearray(some_bytes)
if frame.opcode == OPCODE_TEXT:
if self.message and not self.message.completed:
# We got a text frame before we completed the previous one
msg = CloseControlMessage(code=1002, reason='Received a new message before completing previous')
self.errors |
dafrito/trac-mirror | trac/ticket/tests/report.py | Python | bsd-3-clause | 4,474 | 0.001342 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
from trac.db.mysql_backend import MySQLConnection
from trac.ticket.report import ReportModule
from trac.test import EnvironmentStub, Mock
from trac.web.api import Request, RequestDone
import trac
import unittest
from StringIO import StringIO
class MockMySQLConnection(MySQLConnection):
def __init__(self):
pass
class ReportTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.report_module = ReportModule(self.env)
def tearDown(self):
self.env.reset_db()
def _make_environ(self, scheme='http', server_name='example.org',
server_port=80, method='GET', script_name='/trac',
**kwargs):
environ = {'wsgi.url_scheme': scheme, 'wsgi.input': StringIO(''),
'REQUEST_METHOD': method, 'SERVER_NAME': server_name,
'SERVER_PORT': server_port, 'SCRIPT_NAME': script_name}
environ.update(kwargs)
return environ
def test_sub_var_no_quotes(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"$VAR", {'VAR': 'value'})
self.assertEqual("%s", sql)
self.assertEqual(['value'], values)
self.assertEqual([], missing_args)
def test_sub_var_digits_underscore(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"$_VAR, $VAR2, $2VAR", {'_VAR': 'value1', 'VAR2': 'value2'})
self.assertEqual("%s, %s, $2VAR", sql)
self.assertEqual(['value1', 'value2'], values)
self.assertEqual([], missing_args)
def test_sub_var_quotes(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"'$VAR'", {'VAR': 'value'})
self.assertEqual(self.env.get_read_db().concat("''", '%s', "''"), sql)
self.assertEqual(['value'], values)
self.assertEqual([], missing_args)
def test_sub_var_missing_args(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
| "$VAR, $PARAM, $MISSING", {'VAR': 'value'})
self.assertEqual("%s, %s, %s", sql)
self.assertEqual(['value', '', ''], values)
self.assertEqual(['PARAM', 'MISSING'], missing_args)
def test_csv_escape(self):
buf = StringIO()
def start_response(status, headers):
return buf.write
environ = self._make_environ()
req = Request(env | iron, start_response)
cols = ['TEST_COL', 'TEST_ZERO']
rows = [('value, needs escaped', 0)]
try:
self.report_module._send_csv(req, cols, rows)
except RequestDone:
pass
self.assertEqual('\xef\xbb\xbfTEST_COL,TEST_ZERO\r\n"value, needs escaped",0\r\n',
buf.getvalue())
def test_saved_custom_query_redirect(self):
query = u'query:?type=résumé'
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("INSERT INTO report (title,query,description) "
"VALUES (%s,%s,%s)", ('redirect', query, ''))
id = db.get_last_id(cursor, 'report')
db.commit()
headers_sent = {}
def start_response(status, headers):
headers_sent.update(dict(headers))
environ = self._make_environ()
req = Request(environ, start_response)
req.authname = 'anonymous'
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone,
self.report_module._render_view, req, id)
self.assertEqual('http://example.org/trac/query?' + \
'type=r%C3%A9sum%C3%A9&report=' + str(id),
headers_sent['Location'])
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(trac.ticket.report))
suite.addTest(unittest.makeSuite(ReportTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
jmcarpenter2/swifter | swifter/swifter_tests.py | Python | mit | 31,276 | 0.000831 | import sys
import importlib
import unittest
import subprocess
import time
import logging
import warnings
from psutil import cpu_count
import numpy as np
import numpy.testing as npt
import pandas as pd
import swifter
from tqdm.auto import tqdm
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)-8s.%(msecs)03d %(levelname)-8s %(name)s:%(lineno)-3s %(message)s"
)
ch.setFormatter(formatter)
LOG.addHandler(ch)
def math_vec_square(x):
return x**2
def math_foo(x, compare_to=1):
return x**2 if x < compare_to else x ** (1 / 2)
def math_vec_multiply(row):
return row["x"] * row["y"]
def math_agg_foo(row):
return row.sum() - row.min()
def text_foo(row):
if row["letter"] == "A":
return row["value"] * 3
elif row["letter"] == "B":
return row["value"] ** 3
elif row["letter"] == "C":
return row["value"] / 3
elif row["letter"] == "D":
return row["value"] ** (1 / 3)
elif row["letter"] == "E":
return row["value"]
def clean_text_foo(row):
text = " ".join(row)
text = text.strip()
text = text.replace(" ", "_")
return text
def run_if_modin_installed(cls):
# if modin is installed, run the test/test suite
if importlib.util.find_spec("modin") is not None:
return cls
else: # if modin isnt installed just skip the test(s)
return True
class TestSwifter(unittest.TestCase):
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertDataFrameEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertModinSeriesEqual(self, a, b, msg):
try:
npt.assert_array_almost_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertModinDataFrameEqual(self, a, b, msg):
try:
npt.assert_array_almost_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def modinSetUp(self):
"""
Imports modin before swifter so that we have access to modin functionality
"""
import modin.pandas as md
import swifter
swifter.register_modin()
self.addTypeEqualityFunc(md.Series, self.assertModinSeriesEqual)
self.addTypeEqualityFunc(md.DataFrame, self.assertModinDataFrameEqual)
return md
def setUp(self):
LOG.info(f"Version {swifter.__version__}")
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataFrameEqual)
self.ncores = cpu_count()
class TestSetup(TestSwifter):
def test_set_npartitions(self):
LOG.info("test_set_npartitions")
for swifter_df, set_npartitions, expected in zip(
[
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.resample("3T"),
],
[None, 1000, 1001, 1002],
[cpu_count() * 2, 1000, 1001, 1002],
):
before = swifter_df._npartitions
swifter_df.set_npartitions(set_npartitions)
actual = swifter_df._npartitions
self.assertEqual(actual, expected)
if set_npartitions is not None:
self.assertNotEqual(before, actual)
def test_set_dask_threshold(self):
LOG.info("test_set_dask_threshold")
expected = 1000
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange( | 0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.resample("3T"),
| ]:
before = swifter_df._dask_threshold
swifter_df.set_dask_threshold(expected)
actual = swifter_df._dask_threshold
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_set_dask_scheduler(self):
LOG.info("test_set_dask_scheduler")
expected = "my-scheduler"
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.resample("3T"),
]:
before = swifter_df._scheduler
swifter_df.set_dask_scheduler(expected)
actual = swifter_df._scheduler
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_disable_progress_bar(self):
LOG.info("test_disable_progress_bar")
expected = False
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)},
index=pd.date_range("2019-01-1", "2020-01-1", periods=10),
).swifter.resample("3T"),
]:
before = swifter_df._progress_bar
swifter_df.progress_bar(expected)
actual = swifter_df._progress_bar
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_allow_dask_on_strings(self):
LOG.info("test_allow_dask_on_strings")
expected = True
swifter_df = pd.DataFrame().swifter
before = swifter_df._allow_dask_on_strings
swifter_df.allow_dask_on_strings(expected)
actual = swifter_df._allow_dask_on_strings
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_stdout_redirected(self):
LOG.info("test_stdout_redirected")
print_messages = subprocess.check_output(
[
sys.executable,
"-c",
"import pandas as pd; import numpy as np; import swifter; "
+ "df = pd.DataFrame({'x': np.random.normal(size=4)}, dtype='float32'); "
+ "df.swifter.progress_bar(enable=False)"
+ ".apply(lambda x: print(x.values))",
],
stderr=subprocess.STDOUT,
)
self.assertEqual(
len(print_messages.decode("utf-8").rstrip("\n").split("\n")), 1
)
class TestPandasSeries(TestSwifter):
def test_apply_on_empty_series(self):
LOG.info("test_apply_on_empty_series")
series = pd.Series()
pd_val = series.apply(math_foo, compare_to=1)
swifter_val = series.swifter.apply(math_foo, compare_to=1)
self.assertEqual(pd_val, swifter_val) # equality test
def test_nonvectorized_math_apply_on_small_series(self):
LOG.info("test_nonvectorized_math_apply_on_small_series")
df = pd.DataFrame({"x": np.random.normal(size=1000)})
series = df["x"]
tqdm.pandas(desc="Pandas Vec math apply ~ Series")
pd_val = series.progress_apply(math_foo, compare_to=1)
|
mozilla/zamboni | mkt/tags/utils.py | Python | bsd-3-clause | 2,555 | 0 | from django import forms
from django.utils.translation import ugettext as _, ungettext as ngettext
import mkt
from mkt.access import acl
from mkt.site.utils import slugify
from mkt.tags.models import Tag
def clean_tags(request, tags, max_tags=None):
"""
Blocked tags are not allowed.
Restricted tags can only be edited by Reviewers and Curators.
"""
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = mkt.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = max_tags or mkt.MAX_TAGS
total = len(target)
blocked = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, blocked=True))
if blocked:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext(u'Invalid tag: {0}', 'Invalid tags: {0}',
len(blocked)).format(', '.join(blocked))
raise form | s.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if restricted and not can_edit_restricted_tags(request):
# L10n: {0} is a single tag or a | comma-separated list of tags.
msg = ngettext(u'"{0}" is a reserved tag and cannot be used.',
u'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ngettext(u'You have {0} too many tags.',
u'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
_(u'All tags must be %s characters '
u'or less after invalid characters are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ngettext(u'All tags must be at least {0} character.',
u'All tags must be at least {0} characters.',
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
def can_edit_restricted_tags(request):
return (acl.action_allowed(request, 'Apps', 'Edit') or
acl.action_allowed(request, 'Feed', 'Curate'))
|
teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_set.py | Python | gpl-2.0 | 62,916 | 0.004466 |
import unittest
from test import test_support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import collections
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __cmp__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps(unittest.TestCase):
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.thetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.ass | ertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqu | al(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
sel |
rmmh/skybot | plugins/weather.py | Python | unlicense | 4,026 | 0.000497 | """Weather, thanks to darksky and google geocoding."""
from __future__ import unicode_literals
from util import hook, http
GEOCODING_URL = "https://maps.googleapis.com/maps/api/geocode/json"
DARKSKY_URL = "https://api.darksky.net/forecast/"
def geocode_location(api_key, loc):
"""Get a geocoded location from gooogle's geocoding api."""
try:
parsed_json = http.get_json(GEOCODING_URL, address=loc, key=api_key)
except IOError:
return None
return par | sed_json
def get_weather_data(api_key, lat, long):
"""Get weather data from darksky."""
query = "{key}/{lat},{long}".format(key=api_key, lat=lat, long=long)
u | rl = DARKSKY_URL + query
try:
parsed_json = http.get_json(url)
except IOError:
return None
return parsed_json
def f_to_c(temp_f):
"""Convert F to C."""
return (temp_f - 32) * 5 / 9
def mph_to_kph(mph):
"""Convert mph to kph."""
return mph * 1.609
@hook.api_key("google", "darksky")
@hook.command(autohelp=False)
def weather(inp, chan="", nick="", reply=None, db=None, api_key=None):
""".weather <location> [dontsave] | @<nick> -- Get weather data."""
if "google" not in api_key and "darksky" not in api_key:
return None
# this database is used by other plugins interested in user's locations,
# like .near in tag.py
db.execute(
"create table if not exists "
"location(chan, nick, loc, lat, lon, primary key(chan, nick))"
)
if inp[0:1] == "@":
nick = inp[1:].strip()
loc = None
dontsave = True
else:
dontsave = inp.endswith(" dontsave")
# strip off the " dontsave" text if it exists and set it back to `inp`
# so we don't report it back to the user incorrectly
if dontsave:
inp = inp[:-9].strip().lower()
loc = inp
if not loc: # blank line
loc = db.execute(
"select loc, lat, lon from location where chan=? and nick=lower(?)",
(chan, nick),
).fetchone()
if not loc:
return weather.__doc__
addr, lat, lng = loc
else:
location = geocode_location(api_key["google"], loc)
if not location or location.get("status") != "OK":
reply("Failed to determine location for {}".format(inp))
return
geo = location.get("results", [{}])[0].get("geometry", {}).get("location", None)
if not geo or "lat" not in geo or "lng" not in geo:
reply("Failed to determine location for {}".format(inp))
return
addr = location["results"][0]["formatted_address"]
lat = geo["lat"]
lng = geo["lng"]
parsed_json = get_weather_data(api_key["darksky"], lat, lng)
current = parsed_json.get("currently")
if not current:
reply("Failed to get weather data for {}".format(inp))
return
forecast = parsed_json["daily"]["data"][0]
info = {
"city": addr,
"t_f": current["temperature"],
"t_c": f_to_c(current["temperature"]),
"h_f": forecast["temperatureHigh"],
"h_c": f_to_c(forecast["temperatureHigh"]),
"l_f": forecast["temperatureLow"],
"l_c": f_to_c(forecast["temperatureLow"]),
"weather": current["summary"],
"humid": int(current["humidity"] * 100),
"wind": "Wind: {mph:.1f}mph/{kph:.1f}kph".format(
mph=current["windSpeed"], kph=mph_to_kph(current["windSpeed"])
),
"forecast": parsed_json.get("hourly", {}).get("summary", ""),
}
reply(
"{city}: {weather}, {t_f:.1f}F/{t_c:.1f}C"
"(H:{h_f:.1f}F/{h_c:.1f}C L:{l_f:.1f}F/{l_c:.1f}C)"
", Humidity: {humid}%, {wind} \x02{forecast}\x02".format(**info)
)
if inp and not dontsave:
db.execute(
"insert or replace into "
"location(chan, nick, loc, lat, lon) "
"values (?, ?, ?, ?, ?)",
(chan, nick.lower(), addr, lat, lng),
)
db.commit()
|
Chuban/moose | python/chigger/tests/chigger/test_chigger.py | Python | lgpl-2.1 | 2,346 | 0.003836 | #!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import os
import subprocess
import unittest
class TestChiggerCommandUtility(unittest.TestCase):
"""
Test the chigger command line utility.
"""
def execute(self, *args):
"""
Execute the chigger command line utility with the provided arguments.
"""
cmd = [os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',
'scripts', 'chigger'))]
cmd += list(args)
return subprocess.check_output(cmd)
def testInfo(self):
"""
Test the 'info' command for displaying ExodusII file information.
"""
out = self.execute('info', os.path.join('..', 'input', 'mug_blocks_out.e'))
self.assertIn('convected', out)
self.asser | tIn('aux_elem', out)
self.assertIn('func_pp', out)
def testImg2Mov(self):
"""
Test 'img2mov' command.
"""
pattern = os.path.join('..', 'fi | eld_data', 'gold', 'plot_current_*.png')
out = self.execute('img2mov', pattern, '-o', 'output.mov', '--dry-run', '-j', '4', '--duration', '30')
gold = 'ffmpeg -pattern_type glob -framerate 0 -i ../field_data/gold/plot_current_*.png ' \
'-c:v mpeg2video -b:v 10M -pix_fmt yuv420p -q:v 1 -threads 4 -framerate 0 output.mov'
self.assertIn(gold, out)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.