repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Johnetordoff/osf.io | osf/management/utils.py | Python | apache-2.0 | 457 | 0 | from django.utils.six.moves import input
# From https://stackoverflo | w.com/a/39257511/1157536
def ask_for_confirmation(question, default=None):
"""Ask for confirmation before proceeding.
"""
result = input('{} '.format(question))
if not result and default is not None:
return default
while len(result) < | 1 or result[0].lower() not in 'yn':
result = input('Please answer yes or no: ')
return result[0].lower() == 'y'
|
co-ment/comt | setup.py | Python | agpl-3.0 | 441 | 0 | from setuptools import setup, find_packages
setup( |
name="comt",
use_scm_version=True,
url='http://www.co-ment.org',
license='AGPL3',
description="Web-based Text Annotation Application.",
long_description=open('ABOUT.r | st').read(),
author='Abilian SAS',
author_email='dev@abilian.com',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=['setuptools'],
zip_safe=False,
)
|
gokmen/Rasta | rasta_lib/rasta.py | Python | gpl-2.0 | 16,437 | 0.002799 | #!/usr/bin/python
# -*- coding: utf-8 -*-
''' Rasta RST Editor
2010 - Gökmen Göksel <gokmeng:gmail.com> '''
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as Published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
from utils import *
# i18n Support
import gettext
_ = gettext.translation('rasta', fallback=True).ugettext
from qrstedit import RstTextEdit
class Rasta(QMainWindow):
''' Rasta main class '''
def __init__(self, arguments):
QMainWindow.__init__(self)
self.ui = Ui_Rasta()
self.ui.setupUi(self)
self.ui.textEdit = RstTextEdit(self.ui.splitter)
self.ui.webView = QtWebKit.QWebView(self.ui.splitter)
self.setUnifiedTitleAndToolBarOnMac(True)
self._latest_html = None
# System settings
self.settings = QSettings()
self.readSettings()
self.file_name = TMPFILE
if len(arguments) > 1:
if not unicode(arguments[1]).startswith('--'):
self.loadFile(arguments[1])
self.updateRst(force = True)
else:
self.showHelp()
self.buildToolbar()
if '--hide-source' in arguments:
self.ui.actionShow_Source.toggle()
def updateRst(self, source = None, force = False):
''' Rebuild current source and show it in webview '''
if self.ui.actionLive_Update.isChecked() or\
self.sender() == self.ui.actionUpdate_Now or\
source or force:
self.last_scroll_position = self.ui.webView.page().mainFrame().scrollBarValue(Qt.Vertical)
if not source:
source = unicode(self.ui.textEdit.toPlainText())
PUB.set_source(source)
PUB.set_destination()
PUB.document = PUB.reader.read(PUB.source, PUB.parser, PUB.settings)
PUB.apply_transforms()
logs = []
self.ui.textEdit.clearFlags()
for node in PUB.document.traverse(docutils.nodes.problematic):
node.parent.replace(node, node.children[0])
for node in PUB.document.traverse(docutils.nodes.system_message):
log = clear_log(node)
node.parent.remove(node)
logs.append(log)
line = int(log[0])
self.ui.textEdit.addFlag(line)
html = PUB.writer.write(PUB.document, PUB.destination)
model = LogTableModel(logs, self)
self.ui.logs.setModel(model)
self.ui.logs.resizeColumnsToContents()
self._latest_html = html
self.ui.webView.setHtml(unicode(html, 'UTF-8'))
self.ui.webView.page().mainFrame().setScrollBarValue(Qt.Vertical, self.last_scroll_position)
if len(logs) > 0 and self.ui.actionShow_Logs.isChecked():
self.ui.Logs.show()
else:
self.ui.Logs.hide()
self.ui.textEdit.lineNumber.update()
def addTable(self):
''' Add Rst style table '''
cursor = self.ui.textEdit.textCursor()
cursor.beginEditBlock()
char_format = QTextCharFormat()
char_format.setFontFixedPitch(True)
cursor.setCharFormat(char_format)
row = QInputDialog.getInteger(self,
_('Add Table'), _('Number of rows :'), 1)
if row[1]:
column = QInputDialog.getInteger(self, _('Add Table'),
_('Number of columns :'), 1)
if column[1]:
cell_size = QInputDialog.getInteger(self, _('Add Table'),
_('Cell size :'), 8)
if cell_size[1]:
cursor.insertText('\n')
for times in range(row[0]):
cursor.insertText('%s+\n' % ('+%s' % ('-' * cell_size[0]) * column[0]))
cursor.insertText('%s|\n' % ('|%s' % (' ' * cell_size[0]) * column[0]))
cursor.insertText('%s+\n' % ('+%s' % ('-' * cell_size[0]) * column[0]))
cursor.endEditBlock()
def editTrigger(self):
''' If user clicks some of edit action it calls this method '''
marker = None
header = None
cursor = self.ui.textEdit.textCursor()
if cursor.hasSelection():
selection = cursor.selectedText()
if self.sender() == self.ui.actionBold:
marker = '**'
elif self.sender() == self.ui.actionItalic:
marker = '*'
elif self.sender() == self.ui.actionCode:
marker = '``'
elif self.sender() == self.ui.actionSymbol_1:
header = '~'
elif self.sender() == self.ui.actionSymbol_2:
header = '*'
elif self.sender() == self.ui.actionSymbol_3:
header = '-'
elif self.sender() == self.ui.actionLink:
link, res = QInputDialog.getText(self,
_('Insert Link'), _('Address :'))
if res:
if not unicode(link[0]).startswith('http'):
link = 'http://%s' % link
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText("`%s <%s>`_" % (selection, link))
cursor.endEditBlock()
if header:
add = '' if cursor.positionInBlock() - len(selection) == 0 else '\n' * 2
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText("%s%s\n%s\n" | % (add, selection, (header * len(selection))))
cursor.endEditBlock()
if marker:
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText("%s%s%s" % (marker, selection, marker))
cursor.endEditBlock()
## File Operati | ons
def newFile(self):
''' Create new file '''
if self.checkModified():
self.ui.textEdit.clear()
self.file_name = TMPFILE
self.setWindowTitle('Rasta - %s' % self.file_name)
def fileOpen(self):
''' It shows Open File dialog '''
if self.checkModified():
file_name = QFileDialog.getOpenFileName(self)
if not file_name.isEmpty():
self.loadFile(file_name)
def loadFile(self, file_name, parse_string=False):
''' Load given file and show it in QSci component '''
try:
file_object = file(file_name, 'r')
except Exception, msg:
QMessageBox.warning(self, 'Rasta',
QString(_('Cannot read file %1:\n%2.'))
.arg(file_name)
.arg(msg))
return
self.file_name = file_name
content = unicode(file_object.read())
QApplication.setOverrideCursor(Qt.WaitCursor)
QApplication.restoreOverrideCursor()
if parse_string:
return content
self.ui.textEdit.setPlainText(content)
self.ui.textEdit.document().setModified(False)
self.setWindowTitle('Rasta :: %s' % file_name)
file_object.close()
def resizeEvent(self, event):
self.ui.textEdit.lineNumber.resizeEvent(event)
def saveFile(self):
''' File save operation '''
if self.file_name == TMPFILE or self.sender() == self.ui.actionSave_As:
get_new_file_name = QFileDialog.getSaveFileName(self, _('Save File'))
if not get_new_file_name.isEmpty():
self.file_name = get_new_file_name
else:
return
try:
file_object = file(self.file_name, 'w')
except Exception, msg:
QMessageBox.warning(self, 'Rasta',
QString(_('Cannot write file %1:\n%2.'))
.arg(self.file_name)
.arg(msg))
return False
QAppli |
drcgw/bass | modules/__init__.py | Python | gpl-3.0 | 44 | 0.022727 | __ | all__ = ['pleth_analysis', 'ekg_analysi | s'] |
cernops/neutron | neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py | Python | apache-2.0 | 1,254 | 0.000797 | # Copyright (c) 2014 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law | or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""correct Vxlan En | dpoint primary key
Revision ID: 4eba2f05c2f4
Revises: 884573acbf1c
Create Date: 2014-07-07 22:48:38.544323
"""
# revision identifiers, used by Alembic.
revision = '4eba2f05c2f4'
down_revision = '884573acbf1c'
from alembic import op
TABLE_NAME = 'ml2_vxlan_endpoints'
PK_NAME = 'ml2_vxlan_endpoints_pkey'
def upgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address'])
def downgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address', 'udp_port'])
|
darkryder/django | django/contrib/admin/sites.py | Python | bsd-3-clause | 19,712 | 0.001268 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in sel | f._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **op | tions then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inn |
artemsok/sockeye | sockeye/rnn_attention.py | Python | apache-2.0 | 36,524 | 0.00345 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implementations of different attention mechanisms in sequence-to-sequence models.
"""
import logging
import inspect
from typing import Callable, NamedTuple, Optional, Tuple, Dict, Type
import numpy as np
import mxnet as mx
from . import config
from . import constants as C
from . import coverage
from . import layers
from . import utils
logger = logging.getLogger(__name__)
class AttentionConfig(config.Config):
"""
Attention configuration.
:param type: Attention name.
:param num_hidden: Number of hidden units for attention networks.
:param input_previous_word: Feeds the previous target embedding into the attention mechanism.
:param source_num_hidden: Number of hidden units of the source.
:param query_num_hidden: Number of hidden units of the query.
:param layer_normalization: Apply layer normalization to MLP attention.
:param config_coverage: Optional coverage configuration.
:param num_heads: Number of attention heads. Only used for Multi-head dot attention.
:param is_scaled: If 'dot' attentions should be scaled.
:param dtype: Data type.
"""
def __init__(self,
type: str,
num_hidden: int,
input_previous_word: bool,
source_num_hidden: int,
query_num_hidden: int,
layer_normalization: bool,
config_coverage: Optional[coverage.CoverageConfig] = None,
num_heads: Optional[int] = None,
is_scaled: Optional[bool] = False,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.type = type
self.num_hidden = num_hidden
self.input_previous_word = input_previous_word
self.source_num_hidden = source_num_hidden
self.query_num_hidden = query_num_hidden
self.layer_normalization = layer_normalization
self.config_coverage = config_coverage
self.num_heads = num_heads
self.is_scaled = is_scaled
self.dtype = dtype
def _instantiate(cls, params):
"""
Helper to instantiate Attention classes from parameters. Warns in log if parameter is not supported
by class constructor.
:param cls: Attention class.
:param params: configuration parameters.
:return: instance of `cls` type.
"""
sig_params = inspect.signature(cls.__init__).parameters
valid_params = dict()
for key, value in params.items():
if key in sig_params:
valid_params[key] = value
else:
logger.debug('Type %s does not support parameter \'%s\'' % (cls.__name__, key))
return cls(**valid_params)
def get_attention(config: AttentionConfig, max_seq_len: int, prefix: str = C.ATTENTION_PREFIX) -> 'Attention':
"""
Returns an Attention instance based on attention_type.
:param config: Attention configuration.
:param max_seq_len: Maximum length of source sequences.
:param prefix: Name prefix.
:return: Instance of Attention.
"""
att_cls = Attention.get_attention_cls(config.type)
params = config.__dict__.copy()
params.pop('_frozen')
params['max_seq_len'] = max_seq_len
params['prefix'] = prefix
return _instantiate(att_cls, params)
AttentionInput = NamedTuple('AttentionInput', [('seq_idx', int), ('query', mx.sym.Symbol)])
"""
Input to attention callables.
:param seq_idx: Decoder time step / sequence index.
:param query: Query input to attention mechanism, e.g. decoder hidden state (plus previous word).
"""
AttentionState = NamedTuple('AttentionState', [
('context', mx.sym.Symbol),
('probs', mx.sym.Symbol),
('dynamic_source', mx.sym.Symbol),
])
"""
Results returned from attention callables.
:param context: Context vector (Bahdanau et al, 15). Shape: (batch_size, encoder_num_hidden)
:param probs: Attention distribution over source encoder states. Shape: (batch_size, source_seq_len).
:param dynamic_source: Dynamically updated source encoding.
Shape: (batch_size, source_seq_len, dynamic_source_num_hidden)
"""
class Attention(object):
"""
Generic attention interface that returns a callable for attending to source states.
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param dynamic_source_num_hidden: Number of hidden units of dynamic source encoding update mechanism.
:param dtype: Data type.
"""
__registry = {} # type: Dict[str, Type['Attention']]
@classmethod
def register(cls, att_type: str):
def wrapper(target_cls):
cls.__registry[att_type] = target_cls
return target_cls
return wrapper
@classmethod
def get_attention_cls(cls, att_type: str):
if att_type not in cls.__registry:
raise ValueError('Unknown attention type %s' % att_type)
return cls.__registry[att_type]
def __init__(self,
input_previous_word: bool,
dynamic_source_num_hidden: int = 1,
prefix: str = C.ATTENTION_PREFIX,
dtype: str = C.DTYPE_FP32) -> None:
self.dynamic_source_num_hidden = dynamic_source_num_hidden
self._input_previous_word = input_previous_word
self.prefix = prefix
self.dtype = dtype
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
raise NotImplementedError()
return attend
def get_initial_state(self, source_length: mx.sym.Symbol, source_seq_len: int) -> AttentionState:
"""
Returns initial attention state. Dynamic source | encoding is initialized with zeros.
:param source_length: Source length. Shape: (batch_size,).
:param source_seq_len: Maximum leng | th of source sequences.
"""
dynamic_source = mx.sym.reshape(mx.sym.zeros_like(source_length), shape=(-1, 1, 1))
# dynamic_source: (batch_size, source_seq_len, num_hidden_dynamic_source)
dynamic_source = mx.sym.broadcast_to(dynamic_source, shape=(0, source_seq_len, self.dynamic_source_num_hidden))
return AttentionState(context=None, probs=None, dynamic_source=dynamic_source)
def make_input(self,
seq_idx: int,
word_vec_prev: mx.sym.Symbol,
decoder_state: mx.sym.Symbol) -> AttentionInput:
"""
Returns AttentionInput to be fed into the attend callable returned by the on() method.
:param seq_idx: Decoder time step.
:param word_vec_prev: Embedding of previously predicted ord
:param decoder_state: Current decoder state
:return: Attention input.
"""
query = decoder_state
if self._input_p |
gemagomez/os-reststack-manager | os_reststack_manager/app/tenant_manager.py | Python | apache-2.0 | 2,958 | 0.000676 | #!/usr/bin/env python
from __future__ import print_function
from flask import Blueprint, Flask, jsonify, json, abort, request, g
from os_reststack_manager.app import credentials, db, Tenant, logging
from lib.setup_tenant import tenant_create, extract_keys
from lib.erase_tenant import tenant_delete
import re
import jwt
import os_reststack_manager.config as CONF
mod = Blueprint('tenant-manager', __name__)
logger = logging.getLogger('tenant_manager')
@mod.before_request
def authenticate():
# logger.debug("endpoint request: %s" % request.endpoint)
if re.search('tenant_provisioned', str(request.endpoint)):
g.user = "phone_home"
logger.info("Authentication bypassed: tenant_provisioned")
return
try:
decoded = jwt.decode(request.headers['X-Auth-Token'], credentials['tenant_secret'], algorithms=['HS256'])
g.user = decoded['user']
except KeyError:
logger.error("Error: key error.")
abort(401)
except jwt.DecodeError:
logger.error("Error: decode error")
abort(401)
@mod.route('/', methods=['GET'])
def test_connection():
logger.info("User " + g.user + " testing connection.")
return 'Ok', 200
@mod.route('/tenant', methods=['POST'])
def create_tenant():
logger.info("User %s requested creation", g.user)
data = request.get_json(force=True)
logger.debug("Request data: %s" % data)
mconf = data['machine_conf'] if 'machine_conf' in data else CONF.MACHINE
cconf = data['cloud_conf'] if 'cloud_conf' in data else CONF.CLOUD_CONFIG
ip, machine_id = tenant_create(tenant_name=data['tenant'],
tenant_keys=extract_keys(data['pub_key']),
image_name_or_id=data['image_id'],
credentials=credentials, cloud_conf=cconf,
machine_conf=mconf)
tenant = Tenant(tenant_name=data['tenant'], machine_id=machine_id, ip=ip)
db.session.add(tenant)
db.session.commit()
return jsonify(tenant=data['tenant'], machine_id=machine_id, ip=ip), 202
@mod.route('/tenant/<tenant>', methods=['GET'])
def get_tenant(tenant):
logger.info("User %s is enquiring about %s" % (g.user, tenant))
tenant = Tenant.query.filter_by(tenant_name=tenant).first_or_404()
return jsonify(tenant_name=tenant.tenant_name, machine_id=tenant.machine_id, ip=tenant.ip, status=tenant.status), 200
@mod.route('/tenant/<tenant>', methods=['DELETE'])
def delete_tenant(tenant):
logger.info("User %s deleting tenant %s", (g.user, tenant))
tenant_delete(tenant, CONF.CREDENTIALS)
return "", 200
@mod.route('/tenant/provisioned/<machine_id>', methods=['POST'])
def tenant_provisioned(machine_id):
logger.info("Tenant provisioned! %s" % machine_id)
tenant = Tenant.qu | ery.fil | ter_by(machine_id=machine_id).first_or_404()
tenant.status = 'READY'
db.session.commit()
return "", 200
|
elbruno/Blog | 20200610 Camera/01Camera.py | Python | gpl-2.0 | 567 | 0.010582 | # Bruno Capuano 2020
# display the camera feed using OpenCV
import time
import cv2
# Camera Settings
camera_Width = 640 # 1024 # 1280 # 640
camera_Heigth = 480 # 780 # 960 # 480
frameSize = (camera_Width, camera_Heigth)
video_capture = cv2.VideoCapture(1)
time.sleep(1.0)
while True | :
ret, frameOrig = video_capture.read()
frame = cv2.resize(frameOrig, frameSize)
cv2.imshow('@elbruno - Camera', frame)
# key controller
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
video_capture.release()
cv2.destroyAl | lWindows() |
thedespicableknight/web_crawlers | youtube/youtube_videos_2.py | Python | mit | 766 | 0.006527 | import requests
import pafy
from bs4 import BeautifulSoup
def trade_spider(max_video_number):
video_number = 1
url = 'https://w | ww.youtube.com'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "lxml")
for link in soup.findAll('a', {'class': 'yt-uix-sessionlink yt-ui-ell | ipsis yt-ui-ellipsis-2 spf-link '}):
if link['href']:
href = link['href']
video = pafy.new(url + href)
best_video = video.getbest()
best_video.download(filepath="/home/mihir/PycharmProjects/web_crawlers/")
else:
print(link.string + ': FAILURE')
video_number += 1
if video_number > max_video_number:
break
trade_spider(5) |
anhstudios/swganh | data/scripts/templates/object/static/particle/shared_pt_light_streetlamp_green.py | Python | mit | 454 | 0.048458 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from s | wgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_pt_light_streetlamp_green.iff"
result.attribute_templ | ate_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
lukw00/spaCy | tests/parser/test_base_nps.py | Python | mit | 1,273 | 0.001571 | from __future__ import unicode_literals
import pytest
@pytest.mark.models
def test_nsubj(EN):
sent = EN(u'A base phrase should be recognized.')
base_nps = list(sent.noun_chunks)
assert len(base_nps) == 1
assert base_nps[0].string == 'A base phrase '
@pytest.mark.models
def test_coord(EN):
sent = EN(u'A base phrase and a good phrase are often the same.')
base_nps = list(sent.noun_chunks)
assert len(base_nps) == 2
assert base_nps[0].string == 'A base phrase '
assert base_nps[1].string == 'a good phrase '
@pytest.mark.models
def test_pp(EN):
sent = EN(u'A phrase with another phrase oc | curs')
base_nps = list(sent.noun_chunks)
assert len(base_nps) == 2
assert base_nps[0].string == 'A phrase '
assert base_nps[1].string == 'another phrase '
@pytes | t.mark.models
def test_merge_pp(EN):
sent = EN(u'A phrase with another phrase occurs')
nps = [(np[0].idx, np[-1].idx + len(np[-1]), np.lemma_, np[0].ent_type_) for np in sent.noun_chunks]
for start, end, lemma, ent_type in nps:
sent.merge(start, end, u'NP', lemma, ent_type)
assert sent[0].string == 'A phrase '
assert sent[1].string == 'with '
assert sent[2].string == 'another phrase '
assert sent[3].string == 'occurs'
|
erudit/zenon | eruditorg/apps/public/journal/apps.py | Python | gpl-3.0 | 155 | 0 | # -*- | coding: utf-8 -*-
from django.apps impor | t AppConfig
class JournalConfig(AppConfig):
label = 'public_journal'
name = 'apps.public.journal'
|
ShiZhan/newspapers | utils/import-to-mongodb.py | Python | bsd-3-clause | 637 | 0 | #!/usr/bin/env python
import sys
import pymongo
import json
print "import crawled json file into mongodb 'newspapers' database."
i | f len(sys.argv) < 3:
print "input as [collection] [json_file]"
exit(1)
connection = pymongo.Connection("localhost", 27017)
news_database = connection.newspapers
news_collection = news_database[sys.argv[1]]
json_file_name = sys.argv[2]
try:
with open(json_file_name, mode='r') as json_file:
items = json.loads(json_file.read())
json_file.close()
except Exception, e:
raise e
for item in items:
news_collection.save(item)
print len(items), " items saved to mongodb." | |
arthurio/site_heroku | perso/settings/prod.py | Python | mit | 278 | 0.007194 | from perso.settings import *
# Parse datab | ase configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_ | database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
RPGOne/Skynet | scikit-learn-0.18.1/examples/cluster/plot_cluster_comparison.py | Python | bsd-3-clause | 4,681 | 0.001282 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linka | ge = cluster.AgglomerativeClustering(
linkage="averag | e", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
sandeepdsouza93/TensorFlow-15712 | tensorflow/python/kernel_tests/io_ops_test.py | Python | apache-2.0 | 3,819 | 0.007113 | # -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
class IoOpsTest(tf.test.Tes | tCase):
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
contents = tf.compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(prefix='ReadFileTest',
| dir=self.get_temp_dir(),
delete=False) as temp:
temp.write(contents)
with self.test_session():
read = tf.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = tf.compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(prefix='WriteFileTest',
dir=self.get_temp_dir(),
delete=False) as temp:
pass
with self.test_session() as sess:
w = tf.write_file(temp.name, contents)
sess.run(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
os.remove(temp.name)
def _subset(self, files, indices):
return set(tf.compat.as_bytes(files[i].name)
for i in range(len(files)) if i in indices)
def testMatchingFiles(self):
cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH',
'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
files = [tempfile.NamedTemporaryFile(
prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(tf.matching_files(f.name).eval(),
tf.compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
for f in files:
f.close()
if __name__ == '__main__':
tf.test.main()
|
csira/wallace | tests/cases/config/decos.py | Python | bsd-3-clause | 1,193 | 0 | from tests.utils import should_throw
from tests.utils.registry import register
from wallace.config import App, get_app, get_connection
from wallace.config import GetApp, GetDBConn, GetParameter
from wallace.errors import ConfigError
@register
def test_get_app():
app = App()
class Test(object):
app = GetApp()
assert app == Test.app and app is not None
@register
def test_get_app2():
class Test(object):
app = GetApp()
assert Test.app is None
@register
def test_get_conn():
app = App()
conn = app.add_redis_connection(name='my_redis_conn', host='0.0.0.0')
class Test(object):
db_name = 'my_redis_conn'
db = GetDBConn()
assert Test.db == conn
@register
| @should_throw(ConfigError, 102)
def test_get_conn2():
App()
class Test(object):
| db_name = 'my_redis_conn'
db = GetDBConn()
Test.db
@register
def test_get_param():
App(x=1)
class Test(object):
my_param = GetParameter('x')
assert Test.my_param == 1
@register
@should_throw(ConfigError, 101)
def test_get_param2():
App()
class Test(object):
my_param = GetParameter('x')
Test.my_param
|
noemis-fr/custom | add_button_sale/__openerp__.py | Python | gpl-3.0 | 358 | 0.013966 | {
'name': 'add button on sa | le order',
'version': '7.0',
'category': '',
'author': "Mind And Go",
'website': "http://www.mind-and-go.com",
'summary': ' ',
'sequence':99,
'depends': ['base','sale'],
'data': ['views/sale_order_view.xml',
],
'qweb': [
],
'installable': True,
| 'auto_install': False,
} |
fengbohello/practice | python/func/multi-return.py | Python | lgpl-3.0 | 277 | 0.01083 | #! | /bin/env python
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../libs/")
from var_dump import var_dump
def func(a):
return 1, 2, 3
print func(10)
var_dump(func(10))
x = func(10)
var_dump(x)
a, b, _ = func(10)
var_dump(a, b | )
|
EmreAtes/spack | lib/spack/spack/cmd/use.py | Python | lgpl-2.1 | 1,713 | 0 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import pr | int_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_pla | ceholder_help()
|
ksmaheshkumar/grr | lib/data_stores/mysql_advanced_data_store_test.py | Python | apache-2.0 | 2,003 | 0.008987 | #!/usr/bin/env python
"""Tests the mysql data store."""
import unittest
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import mysql_advanced_data_store
class MysqlAdvancedTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
# Use separate tables for benchmarks / tests so | they can be run in parallel.
config_lib.CONFIG.Set("Mysql.database_name", "grr_test_%s" %
| self.__class__.__name__)
try:
data_store.DB = mysql_advanced_data_store.MySQLAdvancedDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
data_store.DB.RecreateTables()
except Exception as e:
logging.debug("Error while connecting to MySQL db: %s.", e)
raise unittest.SkipTest("Skipping since Mysql db is not reachable.")
def DestroyDatastore(self):
data_store.DB.DropTables()
def testCorrectDataStore(self):
self.assertTrue(
isinstance(data_store.DB,
mysql_advanced_data_store.MySQLAdvancedDataStore))
class MysqlAdvancedDataStoreTest(
MysqlAdvancedTestMixin, data_store_test._DataStoreTest):
"""Test the mysql data store abstraction."""
class MysqlAdvancedDataStoreBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
laffra/pava | pava/implementation/natives/sun/misc/VMSupport.py | Python | mit | 190 | 0.010526 | def add_native_methods(clazz):
def getVMTemporaryDirecto | ry____():
raise NotImplementedError()
clazz.getVMTemporaryDirec | tory____ = staticmethod(getVMTemporaryDirectory____)
|
lbeltrame/danbooru-client | danbooru/danboorupostwidget.py | Python | gpl-2.0 | 8,348 | 0.000839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Luca Beltrame <einar@heavensinferno.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, under
# version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from functools import partial
import sys
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
import PyKDE4.kdecore as kdecore
import PyKDE4.kdeui as kdeui
import PyKDE4.kio as kio
# import danbooru2nepomuk
_TRANSLATED_RATINGS = dict(
Safe=kdecore.i18nc("Image for all audiences", "Safe"),
Questionable=kdecore.i18nc("Image with suggestive themes", "Questionable"),
Explicit=kdecore.i18nc("Image with explicit content",
"Explicit")
)
if sys.version_info.major > 2:
unicode = str
class DanbooruPostWidget(QtGui.QWidget):
"""Widget that displays a DanbooruPost."""
def __init__(self, danbooru_post, parent=None):
super(DanbooruPostWidget, self).__init__(parent)
self.data = danbooru_post
self.url_label = kdeui.KUrlLabel()
self.__text_label = QtGui.QLabel()
self.url_to_file = self.data.file_url
label_text = self.label_text()
self.url_label.setUrl(self.data.file_url)
self.url_label.setPixmap(self.data.pixmap)
full_url = kdecore.KUrl(self.data.file_url).fileName()
self.url_label.setUseTips(True)
self.url_label.setAlignment(QtCore.Qt.AlignCenter)
self.url_label.setTipText(full_url)
self.layout = QtGui.QVBoxLayout(self)
| self.layout.addStretch()
self.layout.addWidget(self.url_label)
if label_text is not None:
self.__text_label.setText(label_text)
self.layout.addWidget(self.__text_label)
self.checkbox = QtGui.QCheckBox()
self.checkbox.setChecked(False)
self.checkbox.setText(kdecore. | i18n("Select"))
# Remove the accelerator, we don't want it
kdeui.KAcceleratorManager.setNoAccel(self.checkbox)
self.checkbox.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
self.layout.addWidget(self.checkbox)
# FIXME: Hack to make sure there's enough space around the image,
# so that things to do not look as cramped
self.layout.setSpacing(6)
self.setup_actions()
def setup_actions(self):
"""Set up the required KActions."""
self.menu = kdeui.KMenu(self)
# self.menu.addTitle("Available actions")
self.action_collection = kdeui.KActionCollection(self)
self.download_action = self.action_collection.addAction(
"download-image")
self.view_action = self.action_collection.addAction(
"view-image")
self.browser_action = self.action_collection.addAction(
"open-browser")
self.copy_link_action = self.action_collection.addAction(
"copy-link")
self.download_action.setText("Download")
self.view_action.setText("View image")
self.browser_action.setText("Open in browser")
self.copy_link_action.setText("Copy image link")
self.download_action.setIcon(kdeui.KIcon("download"))
self.view_action.setIcon(kdeui.KIcon("image-x-generic"))
self.browser_action.setIcon(kdeui.KIcon("internet-web-browser"))
self.menu.addAction(self.view_action)
self.menu.addAction(self.download_action)
self.menu.addAction(self.browser_action)
self.menu.addAction(self.copy_link_action)
self.download_action.triggered.connect(self.download)
self.view_action.triggered.connect(self.view)
self.browser_action.triggered.connect(self.open_browser)
self.copy_link_action.triggered.connect(self.put_in_clipboard)
def contextMenuEvent(self, event):
self.menu.exec_(event.globalPos())
def label_text(self):
"Format the text of the item for display."
height = self.data.height
width = self.data.width
file_size = int(self.data.file_size)
rating = _TRANSLATED_RATINGS[self.data.rating]
# Properly format the strings according to the locale
sizestr = kdecore.ki18np("1 pixel", "%1 pixels")
image_size = kdecore.i18n("Size: %1 x %2",
sizestr.subs(width).toString(),
sizestr.subs(height).toString())
file_size = kdecore.i18n("File size: %1",
kdecore.KGlobal.locale().formatByteSize(file_size))
rating = kdecore.i18n("Rating: %1", rating)
text = image_size + "\n" + file_size + "\n" + rating
return text
def download(self, result):
"""Trigger the download of the image to a user-supplied directory."""
start_name = kdecore.KUrl(self.url_to_file).fileName()
start_url = kdecore.KUrl("kfiledialog:///danbooru/%s" %
unicode(start_name))
# Get the mimetype to be passed to the save dialog
mimetype_job = kio.KIO.mimetype(kdecore.KUrl(self.url_to_file),
kio.KIO.HideProgressInfo)
# Small enough to be synchronous
if kio.KIO.NetAccess.synchronousRun(mimetype_job, self):
mimetype = mimetype_job.mimetype()
caption = kdecore.i18n("Save image file")
enable_previews = kio.KFileDialog.ShowInlinePreview
confirm_overwrite = kio.KFileDialog.ConfirmOverwrite
options = kio.KFileDialog.Option(enable_previews | confirm_overwrite)
filename = kio.KFileDialog.getSaveFileName(start_url,
mimetype, self, caption, options)
if not filename:
return
download_url = kdecore.KUrl(self.url_to_file)
filename = kdecore.KUrl(filename)
download_job = kio.KIO.file_copy(download_url, filename, -1)
download_job.result.connect(self.download_slot)
def download_slot(self, job):
"Slot called by the KJob handling the download."
if job.error():
messagewidget = kdeui.KMessageWidget(self)
messagewidget.setMessageType(kdeui.KMessageWidget.Error)
text = job.errorText()
messagewidget.setText(text)
return
download_name = job.destUrl().toLocalFile()
# Grab a reference to the view
parent_widget = self.parent().parent()
blacklist = parent_widget.preferences.tag_blacklist
tagging = parent_widget.preferences.nepomuk_enabled
if tagging:
# Get the URL of the board for Nepomuk tagging
board_name = kdecore.KUrl(parent_widget.api_data.url)
#danbooru2nepomuk.tag_danbooru_item(download_name, self.data.tags,
# blacklist, board_name)
def view(self):
"""Display the image using the user's default image viewer."""
# Garbage collection ensues if we don't keep a reference around
self.display = kio.KRun(kdecore.KUrl(self.url_to_file), self, 0,
False, True, '')
if self.display.hasError():
messagewidget = kdeui.KMessageWidget(self)
messagewidget.setMessageType(kdeui.KMessageWidget.Error)
text = kdecore.i18n("An error occurred while "
"downloading the image.")
messagewidget.setText(text)
def open_browser(self):
kdecore.KToolInvocation.invokeBrowser(self.url_to_file,
|
Pakketeretet2/lammps | tools/python/pizza/gnu.py | Python | gpl-2.0 | 12,570 | 0.015831 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# gnu tool
oneline = "Create plots via GnuPlot plotting program"
docstr = """
g = gnu() start up GnuPlot
g.stop() shut down GnuPlot process
g.plot(a) plot vector A against linear index
g.plot(a,b) plot B against A
g.plot(a,b,c,d,...) plot B against A, D against C, etc
g.mplot(M,N,S,"file",a,b,...) multiple plots saved to file0000.eps, etc
each plot argument can be a tuple, list, or Numeric/NumPy vector
mplot loops over range(M,N,S) and create one plot per iteration
last args are same as list of vectors for plot(), e.g. 1, 2, 4 vectors
each plot is made from a portion of the vectors, depending on loop index i
Ith plot is of b[0:i] vs a[0:i], etc
series of plots saved as file0000.eps, file0001.eps, etc
if use xrange(),yrange() then plot axes will be same for all plots
g("plot 'file.dat' using 2:3 with lines") execute string in GnuPlot
g.enter() enter GnuPlot shell
gnuplot> plot sin(x) with lines type commands directly to GnuPlot
gnuplot> exit, quit exit GnuPlot shell
g.export("data",range(100),a,...) create file with columns of numbers
all vectors must be of equal length
could plot from file with GnuPlot command: plot 'data' using 1:2 with lines
g.select(N) figure N becomes the current plot
subsequent commands apply to this plot
g.hide(N) delete window for figure N
g.save("file") save current plot as file.eps
Set attributes for current plot:
g.erase() reset all attributes to default values
g.aspect(1.3) aspect ratio
g.xtitle("Time") x axis text
g.ytitle("Energy") y axis text
g.title("My Plot") title text
g.title("title","x","y") title, x axis, y axis text
g.xrange(xmin,xmax) x axis range
g.xrange() default x axis range
g.yrange(ymin,ymax) y axis range
g.yrange() default y axis range
g.xlog() toggle x axis between linear and log
g.ylog() toggle y axis between linear and log
g.label(x,y,"text") place label at x,y coords
g.curve(N,'r') set color of curve N
colors: 'k' = black, 'r' = red, 'g' = green, 'b' = blue
'm' = magenta, 'c' = cyan, 'y' = yellow
"""
# History
# 8/05, Matt Jones (BYU): original version
# 9/05, Steve Plimpton: added mplot() method
# ToDo list
# allow choice of JPG or PNG or GIF when saving ?
# can this be done from GnuPlot or have to do via ImageMagick convert ?
# way to trim EPS plot that is created ?
# hide does not work on Mac aqua
# select does not pop window to front on Mac aqua
# Variables
# current = index of current figure (1-N)
# figures = list of figure objects with each plot's attributes
# so they aren't lost between replots
# Imports and external programs
import types, os
try: from DEFAULTS import PIZZA_GNUPLOT
except: PIZZA_GNUPLOT = "gnuplot"
try: from DEFAULTS import PIZZA_GNUTERM
except: PIZZA_GNUTERM = "x11"
# Class definition
class gnu:
# --------------------------------------------------------------------
def __init__(self):
self.GNUPLOT = os.popen(PIZZA_GNUPLOT,'w')
self.file = "tmp.gnu"
self.figures = []
self.select(1)
# --------------------------------------------------------------------
def stop(self):
self.__call__("quit")
del self.GNUPLOT
# --------------------------------------------------------------------
def __call__(self,command):
self.GNUPLOT.write(command + '\n')
self.GNUPLOT.flush()
# --------------------------------------------------------------------
def enter(self):
while 1:
command = raw_input("gnuplot> ")
if command == "quit" or command == "exit": return
self.__call__(command)
# --------------------------------------------------------------------
# write plot vectors to files and plot them
def plot(self,*vectors):
if len(vectors) == 1:
file = self.file + ".%d.1" % self.current
linear = range(len(vectors[0]))
self.export(file,linear,vectors[0])
self.figures[self.current-1].ncurves = 1
else:
if len(vectors) % 2: raise StandardError,"vectors must come in pairs"
for i in range(0,len(vectors),2):
file = self.file + ".%d.%d" % (self.current,i/2+1)
self.export(file,vectors[i],vectors[i+1])
self.figures[self.current-1].ncurves = len(vectors)/2
self.draw()
# --------------------------------------------------------------------
# create multiple plots from growing vectors, save to numbered files
# don't plot empty vector, create a [0] instead
def mplot(self,start,stop,skip,file,*vectors):
n = 0
for i in range(start,stop,skip):
partial_vecs = []
for vec in vectors:
if i: partial_vecs.append(vec[:i])
else: partial_vecs.append([0])
self.plot(*partial_vecs)
if n < 10: newfile = file + "000" + str(n)
elif n < 100: newfile = file + "00" + str(n)
elif n < 1000: newfile = file + "0" + str(n)
else: newfile = file + str(n)
self.save(newfile)
n += 1
# --------------------------------------------------------------------
# write list of equal-length vectors to filename
def export(self,filename,*vectors):
n = len(vectors[0])
for vector in vectors:
if len(vector) != n: raise StandardError,"vectors must be same length"
f = open(filename,'w')
nvec = len(vectors)
for i in xrange(n):
for j in xrange(nvec):
print >>f,vectors[j][i],
print >>f
f.close()
# --------------------------------------------------------------------
# select plot N as current plot
def select(self,n):
self.current = n
if len(self.figures) < n:
for i in range(n - len(self.figures)):
self.figures.append(figure())
cmd = "set term " + PIZZA_GNUTERM + ' ' + str(n)
self.__call__(cmd)
if self.figures[n-1].ncurves: self.draw()
# --------------------------------------------------------------------
# delete window for plot N
def hide(self,n):
cmd = "set term %s close %d" % (PIZZA_GNUTERM,n)
self.__call__(cmd)
# --------------------------------------------------------------------
# save plot to file.eps
# final re-select will reset terminal
# do not continue until plot file is written out
# else script could go forward and change data file
# use tmp.done as semaphore to indicate plot is finished
def save(self,file):
self.__call__("set terminal postscript enhanced solid lw 2 color portrait")
cmd = "set output '%s.eps'" % file
self.__call__(cmd)
if os.path.exists("tmp.done"): os.remove("tmp.done")
self.draw()
self.__call__("!touch tmp.done")
while not os.path.exists("tmp.done"): continue
self.__call__("set output")
self.select(self.current)
# --------------------------------------------------------------------
# restore default attributes by creating a new fig object
def erase(self):
fig = figure()
fig.ncurves = self.figures[self.current-1].ncurves
self.figures[self.current-1] = fig
self.draw()
# --------------------------------------------------------------------
def aspect(self,value):
self.figures[self.current-1].aspect = value
self.draw()
# ------------------------------------- | -------------------------------
def xrange(self,*values):
if len(values) == | 0:
self.figures[self.current-1].xlimit = 0
else:
self.figures[self.current-1].xlimit = (values[0],values[1])
s |
ruymanengithub/vison | vison/datamodel/fpa_dm.py | Python | gpl-3.0 | 11,944 | 0.004689 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
FPA Data Model.
LE1 FITS files.
Created on Thu Aug 1 17:05:12 2019
@author: raf
"""
# IMPORT STUFF
from pdb import set_trace as stop
from astropy.io import fits as fts
import os
import copy
import numpy as np
from collections import OrderedDict
from vison.fpa import fpa as fpamod
from vison.datamodel import ccd as ccdmod
# END IMPORT
Quads = ['E', 'F', 'G', 'H']
NSLICES = fpamod.NSLICES
NCOLS = fpamod.NCOLS
ext_ids = [None]
for j in range(1, NSLICES + 1):
for i in [1, 2, 3]:
ix = (j - 1) * 6 * 4 + (i - 1) * 4 + 1
ext_ids.append((ix + 0, j, i, 'E', (0, 0)))
ext_ids.append((ix + 1, j, i, 'F', (1, 0)))
ext_ids.append((ix + 2, j, i, 'G', (1, 1)))
ext_ids.append((ix + 3, j, i, 'H', (0, 1)))
for i in [4, 5, 6]:
ix = (j - 1) * 6 * 4 + (i - 1) * 4 + 1
ext_ids.append((ix + 0, j, i, 'G', (0, 0)))
ext_ids.append((ix + 1, j, i, 'H', (1, 0)))
ext_ids.append((ix + 2, j, i, 'E', (1, 1)))
ext_ids.append((ix + 3, j, i, 'F', (0, 1)))
PRIhdr_dict = OrderedDict()
PRIhdr_dict['BUNIT'] = 'ADU'
PRIhdr_dict['RA'] = None
PRIhdr_dict['DEC'] = None
PRIhdr_dict['DATE-OBS'] = None
PRIhdr_dict['SEQID'] = None
EXThdr_dict = OrderedDict()
EXThdr_dict['PCOUNT'] = 0
EXThdr_dict['PCOUNT'] = 1
EXThdr_dict['BZERO'] = 32768
EXThdr_dict['EXPTIME'] = 0
EXThdr_dict['CCDID'] = '0-0'
EXThdr_dict['QUADID'] = 'A'
EXThdr_dict['EXTNAME'] = '0-0.A'
EXThdr_dict['PRESCANX'] = 51
EXThdr_dict['OVRSCANX'] = 29
EXThdr_dict['GAIN'] = 3.5
EXThdr_dict['WCSAXES'] = 2
EXThdr_dict['INSTRUME'] = 'EUCLID-VIS'
class FPA_LE1(object):
"""Class for LE1 fits files built from system-level image data (whole FPA).
This class relies on ccdobj instances to do analysis of the images (one CCD at a time).
"""
Quads = ['E', 'F', 'G', 'H']
def __init__(self, infits=None):
""" """
self.NEXTENSIONS = 145
self.QNAXIS1 = 2128
self.QNAXIS2 = 2086
self.Qshape = (self.QNAXIS1, self.QNAXIS2)
self.ext_ids = copy.deepcopy(ext_ids)
self.extensions = []
self.extnames = []
if infits is not None:
self.loadfromFITS(infits)
self.fpamodel = fpamod.FPA()
self.fillval = 0
def add_extension(self, data, header, label=None, headerdict=None):
"""Adds an extension."""
if data is not None:
assert data.shape == self.Qshape
self.extensions.append(ccdmod.Extension(data, header, label, headerdict))
def set_extension(self, iext, data, header, label=None, headerdict=None):
"""Changes the contents of an extension."""
if data is not None:
assert data.shape == self.Qshape
self.extensions[iext] = ccdmod.Extension(data, header, label, headerdict)
def del_extension(self, ixextension):
"""Deletes an extension."""
self.extensions.pop(ixextension)
def initialise_as_blank(self, fillval=None):
"""Initialises object as a blank (filled with 'fillval') image of the FPA."""
if fillval is None:
fillval = self.fillval
headerdict0 = PRIhdr_dict.copy()
self.add_extension(data=None, header=None, label=None, headerdict=headerdict0)
for iext in range(1, self.NEXTENSIONS):
data = np.zeros((self.QNAXIS1, self.QNAXIS2), dtype='float32')+fillval
header = None
headerdict = EXThdr_dict.copy()
_extid = self.ext_ids[iext]
headerdict['CCDID'] = '%i-%i' % (_extid[1], _extid[2])
headerdict['QUADID'] = _extid[3]
headerdict['EXTNAME'] = '%s.%s' % (headerdict['CCDID'],headerdict['QUADID'])
self.add_extension(data, header, label=None, headerdict=headerdict)
self.extnames.append(None)
def loadfromFITS(self, infits):
"""Loads contents of self from a FITS file."""
hdulist = fts.open(infits)
nextensions = len(hdulist)
assert nextensions == self.NEXTENSIONS
extensions = np.arange(nextensions)
for iext in extensions:
hdu = hdulist[iext]
if hdu.data is not None:
data = hdu.data.transpose().astype('float32').copy()
else:
data = None
header = hdu.header
if 'EXTNAME' in hdu.header:
label = hdu.header['EXTNAME']
else:
label = None
self.extensions.append(ccdmod.Extension(data, header, label))
self.extnames.append(label)
hdulist.close()
def savetoFITS(self, outfits, clobber=True, unsigned16bit=False):
"""Dumps self to a FITS file."""
prihdr = self.extensions[0].header
prihdu = fts.PrimaryHDU(data=None,
header=prihdr)
hdulist = fts.HDUList([prihdu])
for iext in range(1, self.NEXTENSIONS):
idata = self.extensions[iext].data.T.copy()
iheader = self.extensions[iext].header
iname = self.extensions[iext].label
ihdu = fts.ImageHDU(data=idata, header=iheader, name=iname)
if unsigned16bit:
ihdu.scale('int16', '', bzero=32768)
ihdu.header.add_history(
'Scaled to unsigned 16bit integer!')
hdulist.append(ihdu)
hdulist.writeto(outfits, overwrite=clobber)
def get_CCDID_from_BLCCD(self, BLOCK, CCD):
"""
Retrieves CCD ID (e.g. C_11) given BLOCK name and CCD in block.
Parameters
----------
:param BLOCK: block nickname (e.g. 'CURIE')
:param CCD: 1, 2 or 3
"""
return self.fpamodel.get_Ckey_from_BlockCCD(BLOCK, CCD)
def get_extid(self, CCDID, Q):
"""
Retrieves extension ID given CCDID and quadrant.
Parameters
----------
:param CCDID: e.g. 'C_11'
:param Q: 'E', 'F', 'G' or 'H'
"""
jC, iS = int(CCDID[2]), int(CCDID[3])
for ix in range(1, self.NEXTENSIONS):
_extid = self.ext_ids[ix]
if _extid[1] == jC and _extid[2] == iS and _extid[3] == Q:
return _extid
return None
def get_ccdobj(self, CCDID):
"""Returns a CCD Object given a CCDID"""
ccdobj = ccdmod.CCD(withpover=True, overscan=29)
blnk = np.zeros((ccdobj.NAXIS1, ccdobj.NAXIS2), dtype='float32')
ccdobj.add_extension(blnk, header=None, label=None, headerdict=None)
for Q in self.Quads:
extid = self.get_extid(CCDID, Q)
extix = extid[0]
os_coo = extid[4]
flip = os_coo
Qdata = self.extensions[extix].data.copy()
Qdata = self.fpamodel.flip_img(Qdata, flip)
padQdata = np.zeros((ccdobj.NAXIS1 // 2, ccdobj.NAXIS2 // 2), dtype='float32')
padQdata[:, :] = Qdata.copy()
ccdobj.set_quad(padQdata, Q, canonical=True, extension=-1)
return ccdobj
def _padd_extra_soverscan(self, Qdata):
"""Padds extra serial overscan with sel | f.fillval.
Used when building synthetic data of LE1 format using as input
images from the VGCC that only h | ave 20 columns of serial
overscan (FPA images have 29)."""
pQdata = np.zeros((self.QNAXIS1, self.QNAXIS2), dtype=Qdata.dtype)+self.fillval
pQdata[0:Qdata.shape[0], 0:Qdata.shape[1]] = Qdata.copy()
return pQdata
def set_ccdobj(self, ccdobj, CCDID, inextension=-1):
"""Sets the image contents of input ccdobj to the image in self, at CCDID."""
for Q in self.Quads:
extid = self.get_extid(CCDID, Q)
extix = extid[0]
os_coo = extid[4]
flip = os_coo
Qdata = ccdobj.get_quad(Q, canonical=True, extension=inextension)
if Qdata.shape[0] < self.QNAXIS1:
Qdata = self._padd_extra_soverscan(Qdata)
Qdata = self.fpamodel.flip_img(Qdata, flip)
_extid = self.get_extid(CCDID,Q)
|
climapulse/dj-labeler | runtests.py | Python | bsd-3-clause | 999 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from django.conf import settings
from django.core.management import | execu | te_from_command_line
import sys
if not settings.configured:
test_runners_args = {}
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.SHA1PasswordHasher',
),
ROOT_URLCONF=None,
USE_TZ=True,
SECRET_KEY='foobar',
SILENCED_SYSTEM_CHECKS=['1_7.W001'],
**test_runners_args
)
def runtests():
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
sauloal/PiCastPy | werkzeug/contrib/lint.py | Python | mit | 12,238 | 0.000817 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
| if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
se | lf._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if |
droidsec-cn/FuzzLabs | requests/network_TEST.py | Python | gpl-2.0 | 288 | 0 | # ============================= | ================================================
# Basic TEST
# This file is part of the FuzzLabs Fuzzing Framework
# =============================================================================
from sulley import *
s_initializ | e("TEST")
s_string("TEST")
|
teejaydub/khet | simpleSound.py | Python | gpl-2.0 | 1,290 | 0.007752 | # simpleSound.py
# Plays audio files on Linux and Windows.
# Written Jan-2008 by Timothy Weber.
# Based on (reconstituted) co | de posted by Bill Dandreta at <http://www.velocityreviews.com/forums/t337346-how-to-play-sound-in-python.html>.
import platform
if platform.system().startswith('Win'):
from winsound import PlaySo | und, SND_FILENAME, SND_ASYNC
elif platform.system().startswith('Linux'):
from wave import open as waveOpen
from ossaudiodev import open as ossOpen
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def Play(filename):
"""Plays the sound in the given filename, asynchronously."""
if platform.system().startswith('Win'):
PlaySound(filename, SND_FILENAME|SND_ASYNC)
elif platform.system().startswith('Linux'):
try:
s = waveOpen(filename,'rb')
(nc,sw,fr,nf,comptype, compname) = s.getparams( )
dsp = ossOpen('/dev/dsp','w')
dsp.setparameters(AFMT_S16_NE, nc, fr)
data = s.readframes(nf)
s.close()
dsp.write(data)
dsp.close()
except:
pass
|
ssssam/calliope | tests/conftest.py | Python | gpl-2.0 | 872 | 0 | # Calliope
# Copyright (C) 2017, 2018 Sam Thursfield <sam@afuera.me.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License | for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http | ://www.gnu.org/licenses/>.
import pytest
import testutils
@pytest.fixture()
def cli():
'''Fixture for testing through the `cpe` commandline interface.'''
return testutils.Cli()
|
asterix24/GestionaleCaldaie | main/views.py | Python | gpl-2.0 | 9,759 | 0.0083 | #!/usr/bmport settings
# -*- coding: utf-8 -*-
from django import http
from django.shortcuts import render
from main import models
from main import myforms
from main import cfg
from main import tools
from main import data_render
from main import database_manager
from main import scripts
from main import errors
from main import user_settings
import logging
logger = logging.getLogger(__name__)
import csv
import datetime
def __getIds(raw_items, item_id):
l = []
for k in raw_items:
ids = k.split(',')
l.append(ids[item_id])
re | turn l
import xlwt
def __export_xls(data_table, filename="tabella"):
# Create the HttpResponse object with the appropriate CSV header.
response = http.HttpResponse(mimetype='application/ms-excel; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="%s_%s.xls"' % (filename, datetime.datetime.today().strftime("%d-%m-%Y"))
book = xlwt.Workbook | (encoding='utf-8')
sheet = book.add_sheet('Elenco')
#Add header
export_list = user_settings.settings_columView('export_table')
for colum,j in enumerate(export_list):
sheet.write(0, colum, "%s" % j.replace('_', ' ').capitalize())
#Write table
for row,i in enumerate(data_table):
for colum,j in enumerate(export_list):
#we should skip the header row.
sheet.write(row + 1, colum, data_render.formatFields(i,j, default_text="-"))
book.save(response)
return response
def __export_csv(data_table, filename="tabella"):
# Create the HttpResponse object with the appropriate CSV header.
response = http.HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s_%s.csv"' % (filename, datetime.datetime.today().strftime("%d-%m-%Y"))
export_list = user_settings.settings_columView('export_table')
response.write("\xEF\xBB\xBF")
writer = tools.UnicodeWriter(response, delimiter=';')
writer.writerow(["%s" % j.replace('_', ' ').capitalize() for j in export_list])
for item_dict in data_table:
l = []
for i in export_list:
l.append(data_render.formatFields(item_dict, i, default_text="-"))
writer.writerow(l)
return response
def export_table(request):
search_string = request.GET.get('search_keys','')
data_table = database_manager.search_fullText(search_string)
return __export_xls(data_table, "Anagrafe")
def home(request, d={}):
form = myforms.RangeDataSelect()
data = ''
notification = ''
# Use default at first time when the home page is never loaded
form_dict = {
'search_keys' : "",
'filter_type' : None,
'ref_month' : None,
'ref_year' : None,
'order_by_field' : "",
'ordering' : "",
}
if request.method == 'POST':
selected_rows = request.POST.getlist('row_select', [])
action = request.POST.get('button_action', '')
if action == 'Lettera':
ids = __getIds(selected_rows, data_render.CLIENTE_ID)
data_to_render = database_manager.search_ids('main_cliente.id', ids)
return generate_report(data_to_render)
elif action == 'Scarica Tabella':
ids = __getIds(selected_rows, data_render.CLIENTE_ID)
data_to_render = database_manager.search_ids('main_cliente.id', ids)
return __export_xls(data_to_render, "Elenco")
else:
for i in selected_rows:
ids = i.split(',')
verifica_id = ids[data_render.VERIFICA_ID]
if verifica_id != 'None':
_id = int(verifica_id)
if action == 'Apri':
models.Verifica.objects.filter(id=_id).update(stato_verifica='A')
if action == 'Chiudi':
models.Verifica.objects.filter(id=_id).update(stato_verifica='C')
if action == 'Sospendi':
models.Verifica.objects.filter(id=_id).update(stato_verifica='S')
if request.method == 'GET' and request.GET != {}:
form = myforms.RangeDataSelect(request.GET)
if form.is_valid():
form_dict['search_keys'] = form.cleaned_data['search_keys']
form_dict['filter_type'] = form.cleaned_data['filter_type']
form_dict['ref_month'] = form.cleaned_data['ref_month']
form_dict['ref_year'] = form.cleaned_data['ref_year']
form_dict['order_by_field'] = form.cleaned_data['order_by_field']
form_dict['ordering'] = form.cleaned_data['ordering']
data_to_render = database_manager.search_inMonth(**form_dict)
dr = data_render.DataRender(data_to_render)
dr.selectColums(user_settings.settings_columView('home_view'))
tb_top = [
"<button class=\"btn btn-info dropdown-toggle\" data-toggle=\"dropdown\">Seleziona \
<span class=\"caret\"></span></button> \
<ul class=\"dropdown-menu\"> \
<li><a id=\"action\" href=\"#\">Aperti</a></li> \
<li><a id=\"action\" href=\"#\">Sospesi</a></li> \
<li><a id=\"action\" href=\"#\">Chiusi</a></li> \
<li class=\"divider\"></li> \
<li><a id=\"action\" href=\"#\">Tutti</a></li> \
<li><a id=\"action\" href=\"#\">Nessuno</a></li> \
</ul>",
"<input class=\"btn btn-info\" type=\"submit\" name=\"button_action\" value=\"Apri\">",
"<input class=\"btn btn-info\" type=\"submit\" name=\"button_action\" value=\"Chiudi\">",
"<input class=\"btn btn-info\" type=\"submit\" name=\"button_action\" value=\"Sospendi\">",
"<input class=\"btn btn-info\" type=\"submit\" name=\"button_action\" value=\"Lettera\">",
"<input class=\"btn btn-info\" type=\"submit\" name=\"button_action\" value=\"Scarica Tabella\">",
]
tb_left = [
"<input type=\"checkbox\" name=\"row_select\" id=\"{stato_verifica}\" value=\"{cliente_id},{impianto_id},{verifica_id},{intervento_id}\">"
]
dr.toolbar(top=tb_top, left=tb_left)
dr.msgItemsEmpty("<br><h3>La ricerca non ha prodotto risultati.</h3>")
dr.msgStatistics(("<br><h2>Nel mese di %s " % myforms.monthStr(form_dict['ref_month'])) + "COUNT interventi in scadenza.</h2><br>")
dr.showStatistics()
dr.orderUrl('home', form_dict)
data += dr.toTable()
form_dict['status'] = True
data_to_render = database_manager.search_inMonth(**form_dict)
dr = data_render.DataRender(data_to_render)
dr.selectColums(user_settings.settings_columView('home_view'))
dr.toolbar(top=tb_top, left=tb_left)
dr.msgItemsEmpty("")
dr.msgStatistics(("<br><h2>N.COUNT interventi chiusi nel mese di %s" % myforms.monthStr(form_dict['ref_month'])) + ".</h2><br>")
dr.showStatistics()
data += dr.toTable()
if d:
notification = data_render.notification(d['message_hdr'], d['message'], d['message_type'])
return render(request, 'home.html',{'query_path':request.get_full_path(),
'notification': notification,
'data': data,
'data_form': form,
'scripts': scripts.HOME_ADD_JS,
})
def populatedb(request):
#data = tools.insert_csv_files(cli_on=False)
data = tools.load_csv('/home/asterix/gestionale_www/main/elenco2011.csv')
return _display_ok(request, "DB aggiornato con sucesso\n" + data)
def test(request):
print request.POST.getlist('or', [])
show = cfg.HOME_STD_VIEW
hide = ["Vuota"]
#print show, hide
return render(request, 'test.html', {'items_show': show, 'items_hide':hide })
from functools import partial
import tempfile
import re
import os,sys
import gestionale
def tag_replace(m, item_dict):
k = m.group()
field_name = k[1:-1].lower()
field = data_render.formatFields(item_dict, field_name, default_text="-")
return ''.join([c if ord(c) < 128 else u'\\u' + unicode(ord(c)) + u'?' for c in unicode(field)])
def gen |
Farbod909/parking-app-server | parking/apps.py | Python | mit | 89 | 0 | from d | jango.apps import AppConfig
class ParkingConfig(AppConfig):
name = 'parking' | |
bethesirius/ChosunTruck | linux/tensorbox/utils/train_utils.py | Python | gpl-3.0 | 10,836 | 0.00526 | import numpy as np
import random
import json
import os
import cv2
import itertools
from scipy.misc import imread, imresize
import tensorflow as tf
from data_utils import (annotation_jitter, annotation_to_h5)
from utils.annolist import AnnotationLib as al
from rect import Rect
from utils import tf_concat
def rescale_boxes(current_shape, anno, target_height, target_width):
x_scale = target_width / float(current_shape[1])
y_scale = target_height / float(current_shape[0])
for r in anno.rects:
assert r.x1 < r.x2
r.x1 *= x_scale
r.x2 *= x_scale
assert r.y1 < r.y2
r.y1 *= y_scale
r.y2 *= y_scale
return anno
def load_idl_tf(idlfile, H, jitter):
"""Take the idlfile and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected."""
annolist = al.parse(idlfile)
annos = []
for anno in annolist:
anno.imageName = os.path.join(
os.path.dirname(os.path.realpath(idlfile)), anno.imageName)
annos.append(anno)
random.seed(0)
if H['data']['truncate_data']:
annos = annos[:10]
for epoch in itertools.count():
random.shuffle(annos)
for anno in annos:
I = imread(anno.imageName)
#Skip Greyscale images
if len(I.shape) < 3:
continue
if I.shape[2] == 4:
I = I[:, :, :3]
if I.shape[0] != H["image_height"] or I.shape[1] != H["image_width"]:
if epoch == 0:
anno = rescale_boxes(I.shape, anno, H["image_height"], H["image_width"])
I = imresize(I, (H["image_height"], H["image_width"]), interp='cubic')
if jitter:
jitter_scale_min=0.9
jitter_scale_max=1.1
jitter_offset=16
I, anno = annotation_jitter(I,
anno, target_width=H["image_width"],
target_height=H["image_height"],
jitter_scale_min=jitter_scale_min,
jitter_scale_max=jitter_scale_max,
jitter_offset=jitter_offset)
boxes, flags = annotation_to_h5(H,
anno,
H["grid_width"],
H["grid_height"],
H["rnn_len"])
yield {"image": I, "boxes": boxes, "flags": flags}
def make_sparse(n, d):
v = np.zeros((d,), dtype=np.float32)
v[n] = 1.
return v
def load_data_gen(H, phase, jitter):
grid_size = H['grid_width'] * H['grid_height']
data = load_idl_tf(H["data"]['%s_idl' % phase], H, jitter={'train': jitter, 'test': False}[phase])
for d in data:
output = {}
rnn_len = H["rnn_len"]
flags = d['flags'][0, :, 0, 0:rnn_len, 0]
boxes = np.transpose(d['boxes'][0, :, :, 0:rnn_len, 0], (0, 2, 1))
assert(flags.shape == (grid_size, rnn_len))
assert(boxes.shape == (grid_size, rnn_len, 4))
output['image'] = d['image']
output['confs'] = np.array([[make_sparse(int(detection), d=H['num_classes']) for detection in cell] for cell in flags])
output['boxes'] = boxes
output['flags'] = flags
yield output
def add_rectangles(H, orig_image, confidences, boxes, use_stitching=False, rnn_len=1, min_conf=0.1, show_removed=True, tau=0.25, show_suppressed=True):
image = np.copy(orig_image[0])
num_cells = H["grid_height"] * H["grid_width"]
boxes_r = np.reshape(boxes, (-1,
H["grid_height"],
H["grid_width"],
rnn_len,
4))
confidences_r = np.reshape(confidences, (-1,
H["grid_height"],
H["grid_width"],
rnn_len,
H['num_classes']))
cell_pix_size = H['region_size']
all_rects = [[[] for _ in range(H["grid_width"])] for _ in range(H["grid_height"])]
for n in range(rnn_len):
for y in range(H["grid_height"]):
for x in range(H["grid_width"]):
bbox = boxes_r[0, y, x, n, :]
abs_cx = int(bbox[0]) + cell_pix_size/2 + cell_pix_size * x
abs_cy = int(bbox[1]) + cell_pix_size/2 + cell_pix_size * y
w = bbox[2]
h = bbox[3]
conf = np.max(confidences_r[0, y, x, n, 1:])
all_rects[y][x].append(Rect(abs_cx,abs_cy,w,h,conf))
| all_rects_r = [r for row in all_rects for cell in row for r in cell]
if use_stitching:
from stitch_wrapper import stitch_rects
acc_rects = stitch_rects(all_rects, tau)
else:
acc_rects = all_rects_r
if show_suppressed:
pairs = [(all_rects_r, (255, 0, 0))]
else:
pairs = []
pairs.append((acc_rects, (0, 255, 0)))
for rect_set, color in pairs:
for rect in | rect_set:
if rect.confidence > min_conf:
cv2.rectangle(image,
(rect.cx-int(rect.width/2), rect.cy-int(rect.height/2)),
(rect.cx+int(rect.width/2), rect.cy+int(rect.height/2)),
color,
2)
rects = []
for rect in acc_rects:
r = al.AnnoRect()
r.x1 = rect.cx - rect.width/2.
r.x2 = rect.cx + rect.width/2.
r.y1 = rect.cy - rect.height/2.
r.y2 = rect.cy + rect.height/2.
r.score = rect.true_confidence
rects.append(r)
return image, rects
def to_x1y1x2y2(box):
w = tf.maximum(box[:, 2:3], 1)
h = tf.maximum(box[:, 3:4], 1)
x1 = box[:, 0:1] - w / 2
x2 = box[:, 0:1] + w / 2
y1 = box[:, 1:2] - h / 2
y2 = box[:, 1:2] + h / 2
return tf_concat(1, [x1, y1, x2, y2])
def intersection(box1, box2):
x1_max = tf.maximum(box1[:, 0], box2[:, 0])
y1_max = tf.maximum(box1[:, 1], box2[:, 1])
x2_min = tf.minimum(box1[:, 2], box2[:, 2])
y2_min = tf.minimum(box1[:, 3], box2[:, 3])
x_diff = tf.maximum(x2_min - x1_max, 0)
y_diff = tf.maximum(y2_min - y1_max, 0)
return x_diff * y_diff
def area(box):
x_diff = tf.maximum(box[:, 2] - box[:, 0], 0)
y_diff = tf.maximum(box[:, 3] - box[:, 1], 0)
return x_diff * y_diff
def union(box1, box2):
return area(box1) + area(box2) - intersection(box1, box2)
def iou(box1, box2):
return intersection(box1, box2) / union(box1, box2)
def to_idx(vec, w_shape):
'''
vec = (idn, idh, idw)
w_shape = [n, h, w, c]
'''
return vec[:, 2] + w_shape[2] * (vec[:, 1] + w_shape[1] * vec[:, 0])
def interp(w, i, channel_dim):
'''
Input:
w: A 4D block tensor of shape (n, h, w, c)
i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
each having type (int, float, float)
The 4D block represents a batch of 3D image feature volumes with c channels.
The input i is a list of points to index into w via interpolation. Direct
indexing is not possible due to y_1 and z_1 being float values.
Output:
A list of the values: [
w[x_1, y_1, z_1, :]
w[x_2, y_2, z_2, :]
...
w[x_k, y_k, z_k, :]
]
of the same length == len(i)
'''
w_as_vector = tf.reshape(w, [-1, channel_dim]) # gather expects w to be 1-d
upper_l = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
upper_r = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
lower_l = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
lower_r = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
upper_l_idx = to_idx(upper_l, tf.shape(w))
upper_r_idx = |
arruda/bgarena_analysis | bgarena_gatherer/bgarena_gatherer/spiders/bgarena_race_for_galaxy_moves.py | Python | mit | 9,774 | 0.003172 | # -*- coding: utf-8 -*-
import os
import datetime
from functools import partial
import json
import scrapy
from scrapy_splash import SplashRequest
from sqlalchemy.orm import sessionmaker
from bgarena_gatherer.items import GameTableItem
from bgarena_gatherer.models import GameTable, GameTableMoveAction, db_connect
ACC = os.environ.get('ACC', None)
PASS = os.environ.get('PASS', None)
if ACC is None or PASS is None:
raise Exception('Need ACC and PASS envvars')
script_test_using_cookies = """
function main(splash)
local phpsessid = splash.args.phpsessid
local tournoienligne_sso_user = splash.args.tournoienligne_sso_user
local tournoienligne_sso_id = splash.args.tournoienligne_sso_id
local tournoienligneuser = splash.args.tournoienligneuser
local tournoienligneauth = splash.args.tournoienligneauth
local io = splash.args.io
splash:init_cookies(
{
{ domain="en.boardgamearena.com",
secure=false,
value=phpsessid,
path="/",
httpOnly=false,
name="PHPSESSID",
},
{ domain=".boardgamearena.com",
secure=false,
value=tournoienligne_sso_user,
path="/",
httpOnly=false,
name="TournoiEnLigne_sso_user",
},
{ domain=".boardgamearena.com",
secure=false,
value=tournoienligne_sso_id,
path="/",
httpOnly=false,
name="TournoiEnLigne_sso_id",
},
{ domain=".boardgamearena.com",
name="TournoiEnLigneuser",
expires="2026-09-05T16:10:34Z",
value=tournoienligneuser,
path="/",
httpOnly=false,
secure=false,
},
{ domain=".boardgamearena.com",
name="TournoiEnLigneauth",
expires="2026-09-05T16:10:34Z",
value=tournoienligneauth,
path="/",
httpOnly=false,
secure=
false,
},
{ domain="c.boardgamearena.net",
secure=false,
value=io,
path="/socket.io/",
httpOnly=false,
name="io",
},
}
)
local url = splash.args.url
assert(splash:go(url))
assert(splash:wait(6.0))
re | turn splash:html()
end
"""
script_login_cookies = """
function main(splash)
local url = splash.args.url
assert(splash:go(url))
assert(splash:wait(5.0)) |
assert(splash:runjs("document.getElementById('username_input').value = '%s';"))
assert(splash:runjs("document.getElementById('password_input').value = '%s';"))
local get_dimensions = splash:jsfunc([[
function () {
var rect = document.getElementById('login_button').getBoundingClientRect();
return {"x": rect.left, "y": rect.top}
}
]])
splash:set_viewport_full()
splash:wait(0.1)
local dimensions = get_dimensions()
splash:mouse_click(dimensions.x, dimensions.y)
splash:wait(5.0)
return splash:get_cookies()
end
""" % (ACC, PASS)
class LastCheckedTableMoveManager(object):
"""
Manage what was the last crawled table move of a given game
"""
def __init__(self, game):
super(LastCheckedTableMoveManager, self).__init__()
self.game = game
self.session = self.start_session()
self.final_list = []
self.load_list()
print ">>>>> Current crawling list len: %d" % len(self.final_list)
def start_session(self):
engine = db_connect()
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_table_moves_for_game(self):
return self.session.query(GameTableMoveAction).filter(GameTableMoveAction.game==self.game)
def get_last_crawled_move(self):
last_move = list(self.get_table_moves_for_game().order_by(GameTableMoveAction.id.desc()).limit(1))
if len(last_move) == 0:
return None
return last_move[0]
def get_current_tables_to_craw_list(self):
already_crawled_table_ids = self.session.query(GameTableMoveAction.game_table_id).distinct()
tables_not_crawled = self.get_all_finished_tables_ids().filter(~GameTable.id.in_(already_crawled_table_ids))
tables_not_crawled = tables_not_crawled.order_by(GameTable.id.desc()).with_entities(GameTable.id, GameTable.table_link)
return tables_not_crawled
def get_all_finished_tables_ids(self):
return self.session.query(GameTable)\
.filter(GameTable.game==self.game)\
.filter(GameTable.game_status == GameTableItem.GAMESTATUS_OPTS['finished'])\
.filter(GameTable.id > 277298) # ids smaller then this are of old tables and so with no logs
def get_tables_with_id_smaller_than_given_id(self, table_id):
return self.session.query(GameTable)\
.filter(GameTable.game==self.game)\
.filter(GameTable.game_status == GameTableItem.GAMESTATUS_OPTS['finished'])\
.filter(GameTable.id < table_id)\
.order_by(GameTable.id.desc()).with_entities(GameTable.id, GameTable.table_link)
def load_list(self):
table_list = list(self.get_current_tables_to_craw_list())
self.session.close()
self.final_list = [
[tid, self.get_game_review_from_table_link(table_link)] for tid, table_link in table_list
]
return self.final_list
def get_game_review_from_table_link(self, table_link):
table_id = table_link.split('?table=')[-1]
base_game_review_url = "https://en.boardgamearena.com/#!gamereview?table=%s"
return base_game_review_url % table_id
last_checked_manager = LastCheckedTableMoveManager("Race for the Galaxy")
class BGRaceMovesSpider(scrapy.Spider):
name = "bgracemoves"
allowed_domains = ["en.boardgamearena.com"]
game_reviews_to_craw = last_checked_manager.final_list
start_urls = ["https://en.boardgamearena.com/#!account",]
game_name = "Race for the Galaxy"
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(
url,
self.after_login,
endpoint='execute',
args={'lua_source': script_login_cookies},
)
def get_player_name_to_number_mapping(self, response):
name_to_number = {}
players_names = response.xpath('//*[@id="game_result"]/div/div[@class="name"]/text()')
for i, name in enumerate(players_names):
name_to_number[name.extract()] = i+1
return name_to_number
def replace_players_name_in_action(self, action, name_to_number):
players_names = name_to_number.keys()
order_by_len_names = sorted(players_names, key=len)
for p_name in order_by_len_names:
action = action.replace(p_name, 'player-%d' % name_to_number[p_name])
return action
def parse_move_info(self, div):
move_number_txt = div.xpath('text()').extract()
move_number_txt = move_number_txt[0] if move_number_txt else None
move_number = move_number_txt.replace('Move', '').replace(':', '').replace(' ', '')
time = div.xpath('span/text()').extract()
time = time[0] if time else None
return {'move_number': move_number, 'move_date': time}
def parse_action_info(self, div, name_to_number):
action = div.xpath('text()').extract()
action = action[0] if action else None
action = self.replace_players_name_in_action(action, name_to_number)
return action
def parse_real_request(self, tablemodel_id, response):
name_to_number = self.get_player_name_to_number_mapping(response)
divs = response.xpath('//*[@id="gamelogs"]/div')
base_last_move = {
'game': self.game_name,
'game_table_id': tablemodel_id,
}
moves = []
for div in divs:
div_class = div.xpath('@class').extract()
div_class = div_class[0] |
Minkov/site | judge/templatetags/counter.py | Python | agpl-3.0 | 575 | 0.001739 | from itertools import count
from django import temp | late
register = template.Library()
class GetCounterNode(template.Node):
def __init__(self, var_name, start=1):
self.var_name = var_name
self.start = start
def render(self, context):
context[self.var_name] = count(self.start).next
return ''
@register.tag
def get_counter(parser, token):
try:
return GetCounterNode(*token.contents.split()[1:])
except Valu | eError:
raise template.TemplateSyntaxError('%r tag requires arguments' % token.contents.split()[0])
|
mzdaniel/oh-mainline | vendor/packages/mechanize/test-tools/unittest/__init__.py | Python | agpl-3.0 | 2,508 | 0.002392 | """
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmenticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLo | ader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from unittest.result import TestResult
from unittest.case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from unittest.suite import TestSuite
from unittest.loader import (TestLoader, defaultTestLoader, makeSuite,
| getTestCaseNames, findTestCases)
from unittest.main import TestProgram, main
from unittest.runner import TextTestRunner
|
mscuthbert/abjad | abjad/tools/developerscripttools/RenameModulesScript.py | Python | gpl-3.0 | 13,078 | 0.003594 | # -*- encoding: utf-8 -*-
import os
from abjad.tools import documentationtools
from abjad.tools import systemtools
from abjad.tools.developerscripttools.DeveloperScript import DeveloperScript
from abjad.tools.developerscripttools.ReplaceInFilesScript \
import ReplaceInFilesScript
class RenameModulesScript(DeveloperScript):
r'''Renames classes and functions.
Handle renaming the module and package, as well as any tests,
documentation or mentions of the class throughout the Abjad codebase:
.. shell::
ajv rename --help
'''
### PUBLIC PROPERTIES ###
@property
def alias(self):
r'''Alias of script.
Returns ``'rename'``.
'''
return 'rename'
@property
def long_description(self):
r'''Long description of script.
Returns string or none.
'''
return None
@property
def scripting_group(self):
r'''Scripting group of script.
Returns none.
'''
return None
@property
def short_description(self):
r'''Short description of script.
Returns string.
'''
return 'Rename public modules.'
@property
def version(self):
r'''Version of script.
Returns float.
'''
return 1.0
### PRIVATE METHODS ###
def _codebase_name_to_codebase_docs_path(self, codebase):
from abjad import abjad_configuration
if codebase == 'mainline':
return os.path.join(
abjad_configuration.abjad_directory,
'docs',
'source',
'api',
'tools',
)
elif codebase == 'experimental':
return os.path.join(
abjad_configuration.abjad_experimental_directory,
'docs',
'source',
'tools',
)
message = 'bad codebase name: {!r}.'
message = message.format(codebase)
raise Exception(message)
def _codebase_name_to_codebase_tools_path(self, codebase):
from abjad import abjad_configuration
if codebase == 'mainline':
return os.path.join(
abjad_configuration.abjad_directory, 'tools')
elif codebase == 'experimental':
return os.path.join(
abjad_configuration.abjad_experimental_directory, 'tools')
message = 'bad codebase name: {!r}.'
message = message.format(codebase)
raise Exception(message)
def _confirm_name_changes(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
max_codebase = max(len(old_codebase), len(new_codebase))
old_codebase = old_codebase.ljust(max_codebase)
new_codebase = new_codebase.ljust(max_codebase)
print('')
print('Is ...')
print('')
print(' [{}] {}.{}()'.format(
old_codebase, old_tools_package_name, old_module_name))
print(' ===>')
print(' [{}] {}.{}()'.format(
new_codebase, new_tools_package_name, new_module_name))
print('')
string = raw_input('... correct [yes, no, abort]? ').lower()
print('')
if string in ('y', 'yes'):
return True
elif string in ('a', 'abort', 'q', 'quit'):
raise SystemExit
elif string in ('n', 'no'):
return False
def _get_object_names(self, kind, codebase, tools_package_name):
assert kind in ('class', 'function')
tools_path = self._codebase_name_to_codeba | se_tools_path(codebase)
path = os.path.join(tools_path, tools_package_name)
if kind == 'class':
| generator = documentationtools.yield_all_classes(
code_root=path,
include_private_objects=True,
)
elif kind == 'function':
generator = documentationtools.yield_all_functions(
code_root=path,
include_private_objects=True,
)
return tuple(sorted(generator, key=lambda x: x.__name__))
def _get_tools_package_names(self, codebase):
tools_path = self._codebase_name_to_codebase_tools_path(codebase)
names = []
for x in os.listdir(tools_path):
if os.path.isdir(os.path.join(tools_path, x)):
if not x.startswith(('_', '.')):
names.append(x)
return tuple(sorted(names))
def _parse_tools_package_path(self, path):
from abjad import abjad_configuration
if '.' not in path:
raise SystemExit
tools_package_name, module_name = path.split('.')
mainline_tools_directory = os.path.join(
abjad_configuration.abjad_directory,
'tools',
)
for directory_name in os.listdir(mainline_tools_directory):
directory = os.path.join(
mainline_tools_directory, directory_name)
if not os.path.isdir(directory):
continue
elif directory_name != tools_package_name:
continue
return 'mainline', tools_package_name, module_name
experimental_tools_directory = os.path.join(
abjad_configuration.abjad_experimental_directory,
'tools',
)
for directory_name in os.listdir(mainline_tools_directory):
directory = os.path.join(
experimental_tools_directory, directory_name)
if not os.path.isdir(directory):
continue
elif directory_name != tools_package_name:
continue
return 'experimental', tools_package_name, module_name
raise SystemExit
def _rename_old_api_page(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old API page ...')
old_docs_path = self._codebase_name_to_codebase_docs_path(old_codebase)
new_docs_path = self._codebase_name_to_codebase_docs_path(new_codebase)
old_rst_file_name = old_module_name + '.rst'
new_rst_file_name = new_module_name + '.rst'
old_api_path = os.path.join(
old_docs_path, old_tools_package_name, old_rst_file_name)
new_api_path = os.path.join(
new_docs_path, new_tools_package_name, new_rst_file_name)
command = 'mv {} {}'.format(
old_api_path, new_api_path)
systemtools.IOManager.spawn_subprocess(command)
print('')
def _rename_old_module(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old module ...')
old_tools_path = self._codebase_name_to_codebase_tools_path(
old_codebase)
new_tools_path = self._codebase_name_to_codebase_tools_path(
new_codebase)
old_module = old_module_name + '.py'
old_path = os.path.join(
old_tools_path, old_tools_package_name, old_module)
new_module = new_module_name + '.py'
new_path = os.path.join(
new_tools_path, new_tools_package_name, new_module)
command = 'git mv -f {} {}'.format(
old_path, new_path)
systemtools.IOManager.spawn_subprocess(command)
print('')
def _rename_old_test_files(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old test file(s) ...')
old_tools_path = self._codebase_name_to_codebase_tools_path(
old_codebase)
old_test_path = os.path.join(
old_tools_path, old_tools_package_name, 'test')
if not os.path.exists(old_test_path):
return
new_tools_path = self._codebase_na |
zacharydenton/bard | tests/test_generators_markov.py | Python | gpl-3.0 | 1,503 | 0.005323 | import unittest
import nltk
from bard.generators import markov
class TestMarkov(unittest.TestCase):
def setUp(self):
self.to | kens = nltk.corpus.brown.words(categories='science_fiction')
| self.tagged_tokens = nltk.corpus.brown.tagged_words(categories='science_fiction')
self.generator = markov.MarkovGenerator(self.tokens)
self.tagged_generator = markov.MarkovGenerator(self.tagged_tokens)
def test_generator(self):
random_tokens = self.generator.generate(length=100)
self.assertTrue(isinstance(random_tokens[0], str))
def test_tagged_generator(self):
random_tokens = self.tagged_generator.generate(length=100)
self.assertTrue(isinstance(random_tokens[0], tuple))
class TestIntelligentMarkov(TestMarkov):
def setUp(self):
self.tokens = nltk.corpus.brown.words(categories='science_fiction')
self.tagged_tokens = nltk.corpus.brown.tagged_words(categories='science_fiction')
self.generator = markov.IntelligentMarkovGenerator(self.tokens)
self.tagged_generator = markov.IntelligentMarkovGenerator(self.tagged_tokens)
class TestGeneratorFunctions(unittest.TestCase):
def setUp(self):
self.tokens = nltk.corpus.brown.words(categories='science_fiction')
self.tagged_tokens = nltk.corpus.brown.tagged_words(categories='science_fiction')
self.generator = markov.MarkovGenerator(self.tokens)
self.tagged_generator = markov.MarkovGenerator(self.tagged_tokens)
|
hahnicity/mesclan | mesclan/controllers.py | Python | unlicense | 3,754 | 0.000799 | """
mesclan.controllers
~~~~~~~~~~~~~~~~~
"""
from functools import wraps
from hashlib import sha256
import logging
from urlparse import urljoin
from flask import request, Response
import requests
from sqlalchemy.orm.exc import NoResultFound
from ujson import dumps
from mesclan import exceptions
from mesclan.cache import build_cache
from mesclan.constants import DEBUG_TOKEN, FACEBOOK_URL, GET_BOTTLE_FIELDS
from mesclan.handlers import handle_bottle_info
def handle_request(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.BAD_REQUEST_ERRORS as error:
return dumps({"response": error.message}), 400
except exceptions.InvalidTokenError as error:
return dumps({"response": error.message}), 401
except exceptions.UserUnderageError as error:
return dumps({"response": error.message}), 404
except NoResultFound:
return Response(status=404)
except exceptions.CONFLICT_ERRORS as error:
return dumps({"response": error.message}), 409
return wrapper
def create_routes(app):
@app.route("/bottle", methods=["POST"])
@handle_request
def get_bottle_info():
"""
Get information for a bottle
"""
logging.debug("A request was generated for bottle id: {}".format(request.form["id"]))
_validate_fields(GET_BOTTLE_FIELDS)
_validate_token()
return handle_bottle_info(request.form["id"])
# XXX Trigger only with admin authentication
@app.route("/trigger-task/update-cache", methods=["GET"])
def update_cache():
"""
Update our cache
"""
logging.info("Updating the cache per request")
build_cache()
return Response(status=200)
def _validate_fields(fields):
"""
Validate that correct parameters for a POST request were sent
"""
for field in fields:
if field not in request.form:
raise exceptions.FieldNotFoundError(field)
def _validate_response(response):
"""
Validate our response from the Facebook API
"""
if response.status_code != 200:
raise exceptions.StatusCodeError(response)
else:
return response
def _validate_token():
"""
Validate the token we get for requests to get information for a bottle
"""
try:
from mesclan import oauth
except ImportError:
_debug_flow()
if app.debug or app.testing:
_debug_flow()
else:
_facebook_flow(oauth)
def _debug_flow():
"""
Run through the debug flow for validating a user's token
"""
if request.form["token"] != DEBUG_TOKEN:
raise exceptions.InvalidTokenError()
def _facebook_flow(oauth):
"""
Run through the actual facebook flow for validating a user's token.
1. Construct a hash of the appsecret_proof
2. Call the facebook Graph API, validate the age range
"""
appsecret_proof = sha256()
appsecret_proof.update(request.form[" | token"])
appsecret_proof.update(oauth.SECRET)
hash_ = appsecret_proof.digest()
response = _validate_response(requests.get(
"{}/age_range?access_token={}".format(
urljoin(FACEBOOK_URL, re | quest.form["user_id"]), request.form["token"]
),
headers={"appsecret_proof", hash_}
))
# validate age, must be 21 in USA. Because we are making a liquor app...
if response.json()["min"] != "21":
raise exceptions.UserUnderageError()
|
halonsecurity/halonctl | halonctl/models.py | Python | bsd-3-clause | 6,898 | 0.042621 | from __future__ import print_function
import six
import socket
import keyring
import requests
from threading import Lock
from suds.client import Client
from suds.transport.http import HttpAuthenticated
from .proxies import *
from .util import async_dispatch, nodesort, to_base64, from_base64
from . import cache
@six.python_2_unicode_compatible
class Node(object):
'''A single Halon node.
:ivar str name: The configured name of the node.
:ivar halon.models.NodeList cluster: The cluster the node belongs to.
:ivar str scheme: The scheme the node should be accessed over, either http or https
:ivar str host: The hostname of the node
:ivar str username: The effective username; the node's, if any, otherwise the cluster's
:ivar str password: The effective password; the node's or keychain's, if any, otherwise the cluster's
'''
name = u"noname"
cluster = None
scheme = 'http'
host = None
no_verify = False
local_username = None
local_password = None
session = requests.Session()
@property
def service(self):
'''A proxy that can be used to make SOAP calls to the node.
:rtype: :class:`halon.proxies.NodeSoapProxy`
'''
return NodeSoapProxy(self)
@property
def url(self):
'''The base URL for the node.'''
return "{scheme}://{host}/remote/".format(scheme=self.scheme, host=self.host)
@property
def username(self):
return self.local_username or self.cluster.username
@username.setter
def username(self, val):
self.local_username = val
@property
def password(self):
return self.local_password or self.keyring_password or self.cluster.password
@password.setter
def password(self, val):
self.local_password = val
@property
def keyring_password(self):
if not hasattr(self, '_keyring_password') and self.host and self.username:
self._keyring_password = keyring.get_password(self.host, self.username)
return getattr(self, '_keyring_password', None)
def __init__(self, data=None, name=None, cluster=None, load_wsdl=False):
'''Initializes a Node with the given configuration data and name.'''
self.name = name
self.cluster = cluster if not cluster is None else NodeList([self])
if data:
self.load_data(data)
if load_wsdl:
self.load_wsdl()
def load_data(self, s):
'''Updates the node's data from the given configuration string,
overwriting any existing data.'''
remainder = s
# Split out any scheme
parts = remainder.split('://', 1)
if len(parts) == 2:
self.scheme = parts[0]
remainder = parts[1]
# Split the host from the credentials
parts = remainder.split('@', 1)
if len(parts) == 2:
remainder = parts[0]
self.host = parts[1]
# Credentials may or may not include the password
parts = remainder.split(':', 1)
if len(parts) == 2:
self.username = parts[0]
self.password = parts[1]
else:
self.username = parts[0]
else:
self.host = parts[0]
def load_wsdl(self):
'''Loads the cached WSDL file.
This is called automatically the first time a SOAP call is attempted,
or you may call it yourself on startup to e.g. create a bunch of
clients at once over a bunch of threads.'''
if not hasattr(self, '_client'):
self._client = Client("file:{0}".format(cache.get_path('wsdl.xml')), location=self.url, faults=False, nosend=True)
self._client.set_options(cache=None)
def make_request(self, name_, *args, **kwargs):
'''Convenience function that creates a SOAP request context from a
function name and a set of parameters.
The first call to this function is blocking, as the node's WSDL file
will be downloaded synchronously.'''
self.load_wsdl()
return getattr(self._client.service, name_)(*args, **kwargs)
def command(self, command, *args, **kwargs):
'''Convenience function that executes a command on the node, and returns
a CommandProxy that can be used to iterate the command's output, or interact
with the running process.
Note that ``args`` are the command's arguments (first one is the
command name), while ``kwargs`` controls how it's executed, specified
by the following flags:
* ``size`` - the viewport size as (col | s, rows), defaults to (80,24)
* ``cols``, ``rows`` - individual components of ``size``
'''
# Allow calls as command("cmd", "arg1", "arg2") or command("cmd arg1 arg2")
parts = [command] + list(args) if args else command.split(' ')
# Allow size to be specified as size=(cols,rows) or cols=,rows=
size = kwargs.get('size', (80, 24))
size = (kwargs.get('cols', size[0]), kwargs.get('rows', size[1]))
code, cid = self.service.commandRun(argv={'item': [to_base64(part) for part in pa | rts]}, cols=size[0], rows=size[1])
return (200, CommandProxy(self, cid)) if code == 200 else (code, None)
def __str__(self):
s = u"{name} ({host})".format(name=self.name, host=self.host)
if self.cluster.name:
s = u"{cluster}/{s}".format(cluster=self.cluster.name, s=s)
return s
def __repr__(self):
return "Node(name={name}, cluster=<{cluster}>)".format(name=self.name, cluster=self.cluster.name if self.cluster else None)
@six.python_2_unicode_compatible
class NodeList(list):
'''A list of Halon nodes.
It's a regular list for all intents and purposes, but with the added
benefit of keeping track of credentials, and the ability to execute SOAP
calls, either synchronously on one node at a time, or asynchronously on all
of them at once.
'''
name = None
local_username = None
local_password = None
@property
def username(self):
if not self.local_username:
for node in [node for node in self if node.local_username]:
return node.local_username
return self.local_username
@property
def password(self):
if not self.local_password:
for node in [node for node in self if node.local_password or node.keyring_password]:
return node.password or node.keyring_password
return self.local_password
@property
def service(self):
'''An asynchronous SOAP proxy.
This is the recommended way to target multiple nodes with a call, as it
will only take as long as the slowest node takes to respond, rather
than taking longer and longer the mode nodes you're targeting.
:rtype: :class:`halon.proxies.NodeListSoapProxy`
'''
return NodeListSoapProxy(self)
def command(self, command, *args):
'''Executes a command across all contained nodes.'''
return nodesort(async_dispatch({ node: (node.command, (command,) + args) for node in self }))
def load_data(self, data):
'''Updates the nodelist's data from the given configuration dictionary,
overwriting any existing data.'''
if 'username' in data:
self.local_username = data['username']
if 'password' in data:
self.local_password = data['password']
def __str__(self):
return u"{name} -> [{nodes}]".format(name=self.name, nodes=', '.join([node.name for node in self]))
|
ZoranPavlovic/kombu | t/unit/test_messaging.py | Python | bsd-3-clause | 24,456 | 0 | import pickle
import pytest
import sys
from collections import defaultdict
from unittest.mock import Mock, patch
from kombu import Connection, Consumer, Producer, Exchange, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import json
from kombu.utils.functional import ChannelPromise
from t.mocks import Transport
class test_Producer:
def setup(self):
self.exchange = Exchange('foo', 'direct')
self.connection = Connection(transport=Transport)
self.connection.connect()
assert self.connection.connection.connected
assert not self.exchange.is_bound
def test_repr(self):
p = Producer(self.connection)
assert repr(p)
def test_pickle(self):
chan = Mock()
producer = Producer(chan, serializer='pickle')
p2 = pickle.loads(pickle.dumps(producer))
assert p2.serializer == producer.serializer
def test_no_channel(self):
p = Producer(None)
assert not p._channel
@patch('kombu.messaging.maybe_declare')
def test_maybe_declare(self, maybe_declare):
p = self.connectio | n.Producer()
q = Queue('foo')
p.maybe_declare(q)
maybe_declare.assert_called_with(q, p.channel, False)
@patch('kombu.common.maybe_declare')
def test_maybe_declare_when_entity_false(self, maybe_declare):
p = self.connection.Producer()
p.maybe_declare(None)
maybe_declare. | assert_not_called()
def test_auto_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=True)
# creates Exchange clone at bind
assert p.exchange is not self.exchange
assert p.exchange.is_bound
# auto_declare declares exchange'
assert 'exchange_declare' not in channel
p.publish('foo')
assert 'exchange_declare' in channel
def test_manual_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=False)
assert p.exchange.is_bound
# auto_declare=False does not declare exchange
assert 'exchange_declare' not in channel
# p.declare() declares exchange')
p.declare()
assert 'exchange_declare' in channel
def test_prepare(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, headers={})
assert json.loads(m) == message
assert ctype == 'application/json'
assert cencoding == 'utf-8'
def test_prepare_compression(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
headers = {}
m, ctype, cencoding = p._prepare(message, compression='zlib',
headers=headers)
assert ctype == 'application/json'
assert cencoding == 'utf-8'
assert headers['compression'] == 'application/x-gzip'
import zlib
assert json.loads(zlib.decompress(m).decode('utf-8')) == message
def test_prepare_custom_content_type(self):
message = b'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='custom')
assert m == message
assert ctype == 'custom'
assert cencoding == 'binary'
m, ctype, cencoding = p._prepare(message, content_type='custom',
content_encoding='alien')
assert m == message
assert ctype == 'custom'
assert cencoding == 'alien'
def test_prepare_is_already_unicode(self):
message = 'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='text/plain')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
m, ctype, cencoding = p._prepare(message, content_type='text/plain',
content_encoding='utf-8')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
def test_publish_with_Exchange_instance(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient')
assert p._channel.basic_publish.call_args[1]['exchange'] == 'foo'
def test_publish_with_expiration(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), expiration=10)
properties = p._channel.prepare_message.call_args[0][5]
assert properties['expiration'] == '10000'
def test_publish_with_timeout(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('test_timeout', exchange=Exchange('foo'), timeout=1)
timeout = p._channel.basic_publish.call_args[1]['timeout']
assert timeout == 1
def test_publish_with_reply_to(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
assert not p.exchange.name
p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo'))
properties = p._channel.prepare_message.call_args[0][5]
assert properties['reply_to'] == 'foo'
def test_set_on_return(self):
chan = Mock()
chan.events = defaultdict(Mock)
p = Producer(ChannelPromise(lambda: chan), on_return='on_return')
p.channel
chan.events['basic_return'].add.assert_called_with('on_return')
def test_publish_retry_calls_ensure(self):
p = Producer(Mock())
p._connection = Mock()
p._connection.declared_entities = set()
ensure = p.connection.ensure = Mock()
p.publish('foo', exchange='foo', retry=True)
ensure.assert_called()
def test_publish_retry_with_declare(self):
p = self.connection.Producer()
p.maybe_declare = Mock()
p.connection.ensure = Mock()
ex = Exchange('foo')
p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex])
p.maybe_declare.assert_called_with(ex)
def test_revive_when_channel_is_connection(self):
p = self.connection.Producer()
p.exchange = Mock()
new_conn = Connection('memory://')
defchan = new_conn.default_channel
p.revive(new_conn)
assert p.channel is defchan
p.exchange.revive.assert_called_with(defchan)
def test_enter_exit(self):
p = self.connection.Producer()
p.release = Mock()
assert p.__enter__() is p
p.__exit__()
p.release.assert_called_with()
def test_connection_property_handles_AttributeError(self):
p = self.connection.Producer()
p.channel = object()
p.__connection__ = None
assert p.connection is None
def test_publish(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
message = {'the quick brown fox': 'jumps over the lazy dog'}
ret = p.publish(message, routing_key='process')
assert 'prepare_message' in channel
assert 'basic_publish' in channel
m, exc, rkey = ret
assert json.loads(m['body']) == message
assert m['content_type'] == 'application/json'
assert m['content_encoding'] == 'utf-8'
assert m['priority'] == 0
assert m['properties']['delivery_mode'] == 2
assert exc == p.exchange.name
|
magic0704/oslo.messaging | oslo_messaging/_drivers/protocols/amqp/opts.py | Python | apache-2.0 | 3,495 | 0 | # Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
amqp1_opts = [
cfg.StrOpt('server_request_prefix',
default='exclusive',
deprecated_group='amqp1',
help="address prefix used when sending to a specific server"),
cfg.StrOpt('broadcast_prefix',
default='broadcast',
deprecated_group='amqp1',
help="address prefix used when broadcasting to all servers"),
cfg.StrOpt('group_request_prefix',
default='unicast',
deprecated_group='amqp1',
help="address prefix when sending to any server in group"),
cfg.StrOpt('container_name',
default=None,
deprecated_group='amqp1',
help='Name for the AMQP container'),
cfg.IntOpt('idle_timeout',
default=0, # disabled
deprecated_group='amqp1',
help='Timeout for inactive connections (in seconds)'),
cfg.BoolOpt('trace',
default=False,
deprecated_group='amqp1',
help='Debug: dump AMQP frames to stdout'),
cfg.StrOpt('ssl_ca_file',
default='',
deprecated_group='amqp1',
help="CA certificate PEM file to verify server certificate"),
cfg.StrOpt('ssl_cert_file',
default='',
deprecated_group='amqp1',
help='Identifying certificate PEM file to present to clients'),
cfg.StrOpt('ssl_key_file',
default='',
deprecated_group='amqp1',
help='Private key PEM file used to sign cert_file certificate'),
cfg.StrOpt('ssl_key_password',
default=None,
deprecated_group='amqp1',
help='Password for decrypting ssl_key_file (if encrypted)'),
cfg.BoolOpt('allow_insecure_clients',
default=False,
deprecated_group='amqp1',
help='Accept clients using either SSL or plain TCP'),
cfg.StrOpt('sasl_mechanisms',
default='',
deprecated_group='amqp1',
help='Space separated list of acceptable SASL mechanisms'),
cfg.StrOpt('sasl_config_dir',
default='',
deprecated_group='amqp1',
help='Path to directory that contains the SASL configuration'),
cfg.StrOpt('sasl_config_name',
default='',
deprecated_group='amqp1',
help='Name of configuration file (without .conf suffix)'),
cfg.StrO | pt('username',
default='',
deprecated_group='amqp1',
help='User name for message broker authentication'),
cfg.StrOpt('password',
defaul | t='',
deprecated_group='amqp1',
help='Password for message broker authentication')
]
|
DjangoQuilla/pollstutorial | polls/models.py | Python | mit | 767 | 0.023468 | import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.question_text
| def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Q | uestion)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text |
sdss/marvin | python/marvin/contrib/vacs/hi.py | Python | bsd-3-clause | 9,230 | 0.006934 | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-10-11 17:51:43
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-29 17:23:15
from __future__ import print_function, division, absolute_import
import numpy as np
import astropy
import astropy.units as u
import marvin.tools
from marvin.tools.quantities.spectrum import Spectrum
from marvin.utils.general.general import get_drpall_table
from marvin.utils.plot.scatter import plot as scatplot
from marvin import log
from .base import VACMixIn, VACTarget
def choose_best_spectrum(par1, par2, conf_thresh=0.1):
'''choose optimal HI spectrum based on the following criteria:
(1) If both detected and unconfused, choose highest SNR
(2) If both detected and both confused, choose lower confusion prob.
(3) If both detected and one confused, choose non-confused
(4) If one non-confused detection and one non-detection, go with detection
(5) If one confused detetion and one non-detection, go with non-detection
(6) If niether detected, choose lowest rms
par1 and par2 are dictionaries with the following parameters:
program - gbt or alfalfa
snr - integrated SNR
rms - rms noise level
conf_prob - confusion probability
conf_thresh = maximum confusion probability below which we classify
the object as essentially unconfused. Default to 0.1 following
(Stark+21)
'''
programs = [par1['program'],par2['program']]
sel_high_snr = np.argmax([par1['snr'],par2['snr']])
sel_low_rms = np.argmin([par1['rms'],par2['rms']])
sel_low_conf = np.argmin([par1['conf_prob'],par2['conf_prob']])
#both detected
if (par1['snr'] > 0) & (par2['snr'] > 0):
if (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = sel_high_snr
elif (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = 0
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = 1
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = sel_low_conf
#both nondetected
elif (par1['snr'] <= 0) & (par2['snr'] <= 0):
pick = sel_low_rms
#one detected
elif (par1['snr'] > 0) & (par2['snr'] <= 0):
if par1['conf_prob'] < conf_thresh:
pick=0
else:
pick=1
elif (par1['snr'] <= 0) & (par2['snr'] > 0):
if par2['conf_prob'] < conf_thresh:
pick=1
else:
pick=0
return programs[pick]
class HIVAC(VACMixIn):
"""Provides access to the MaNGA-HI VAC.
VAC name: HI
URL: https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1
Description: Returns HI summary data and spectra
Authors: David Stark and Karen Masters
"""
# Required parameters
name = 'HI'
description = 'Returns HI summary data and spectra'
version = {'MPL-7': 'v1_0_1', 'DR15': 'v1_0_1', 'DR16': 'v1_0_2', 'DR17': 'v2_0_1', 'MPL-11': 'v2_0_1'}
display_name = 'HI'
url = 'https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1'
# optional Marvin Tools to attach your vac to
include = (marvin.tools.cube.Cube, marvin.tools.maps.Maps, marvin.tools.modelcube.ModelCube)
# optional methods to attach to your main VAC tool in ~marvin.tools.vacs.VACs
add_methods = ['plot_mass_fraction']
# Required method
def set_summary_file(self, release):
''' Sets the path to the HI summary file '''
# define the variables to build a unique path to your VAC file
self.path_params = {'ver': self.version[release], 'type': 'all', 'program': 'GBT16A_095'}
# get_path returns False if the files do not exist locally
self.summary_file = self.get_path("mangahisum", path_params=self.path_params)
def set_program(self,plateifu):
# download the vac from the SAS if it does not already exist locally
if not self.file_exists(self.summary_file):
self.summary_file = self.download_vac('mangahisum', path_params=self.path_params)
# Find all entries in summary file with this plate-ifu.
# Need the full summary file data.
# Find best entry between GBT/ALFALFA based on dept and confusion.
# Then update self.path_params['program'] with alfalfa or gbt.
summary = HITarget(plateifu, vacfile=self.summary_file)._data
galinfo = summary[summary['plateifu'] == plateifu]
if len(galinfo) == 1 and galinfo['session']=='ALFALFA':
program = 'alfalfa'
elif len(galinfo) in [0, 1]:
# if no entry found or session is GBT, default program to gbt
program = 'gbt'
else:
par1 = {'program': 'gbt','snr': 0.,'rms': galinfo[0]['rms'], 'conf_prob': galinfo[0]['conf_prob']}
par2 = {'program': 'gbt','snr': 0.,'rms': galinfo[1]['rms'], 'conf_prob': galinfo[1]['conf_prob']}
if galinfo[0]['session']=='ALFALFA':
par1['program'] = 'alfalfa'
if galinfo[1]['session']=='ALFALFA':
par2['program'] = 'alfalfa'
if galinfo[0]['fhi'] > 0:
par1['snr'] = galinfo[0]['fhi']/galinfo[0]['efhi']
if galinfo[1]['fhi'] > 0:
par2['snr'] = galinfo[1]['fhi']/galinfo[1]['efhi']
program = choose_best_spectrum(par1,par2)
log.info('Using HI data from {0}'.format(program))
# get path to ancillary VAC file for target HI spectra
self.update_path_params({'program':program})
# Required method
def get_target(self, parent_object):
''' Accesses VAC data for a specific target from a Marvin Tool object '''
# | get any parameters you need from the parent object
plateifu = parent_object.plateifu
self.update_path_params({'plateifu': plateifu})
if parent_object.release in | ['DR17', 'MPL-11']:
self.set_program(plateifu)
specfile = self.get_path('mangahispectra', path_params=self.path_params)
# create container for more complex return data
hidata = HITarget(plateifu, vacfile=self.summary_file, specfile=specfile)
# get the spectral data for that row if it exists
if hidata._indata and not self.file_exists(specfile):
hidata._specfile = self.download_vac('mangahispectra', path_params=self.path_params)
return hidata
class HITarget(VACTarget):
''' A customized target class to also display HI spectra
This class handles data from both the HI summary file and the
individual spectral files. Row data from the summary file for the given target
is returned via the `data` property. Spectral data can be displayed via
the the `plot_spectrum` method.
Parameters:
targetid (str):
The plateifu or mangaid designation
vacfile (str):
The path of the VAC summary file
specfile (str):
The path to the HI spectra
Attributes:
data:
The target row data from the main VAC file
targetid (str):
The target identifier
'''
def __init__(self, targetid, vacfile, specfile=None):
super(HITarget, self).__init__(targetid, vacfile)
self._specfile = specfile
self._specdata = None
def plot_spectrum(self):
''' Plot the HI spectrum '''
if self._specfile:
if not self._specdata:
self._specdata = self._get_data(self._specfile)
vel = self._specdata['VHI'][0]
flux = self._specdata['FHI'][0]
spec = Spectrum(flux, unit=u.Jy, wavelength=vel,
wavelength_unit=u.km / u.s)
ax = spec.plot(
ylabel='HI\ Flux\ Density', xlabel='Velocity', title=self.targetid, ytrim='minmax'
)
return ax
return None
#
# Fun |
jpmml/jpmml-sklearn | pmml-sklearn-extension/src/test/resources/extensions/category_encoders.py | Python | agpl-3.0 | 6,809 | 0.035688 | from sklearn.experimental import enable_hist_gradient_boosting
from category_encoders import BaseNEncoder, BinaryEncoder, CatBoostEncoder, CountEncoder, LeaveOneOutEncoder, OneHotEncoder, OrdinalEncoder, TargetEncoder, WOEEncoder
from mlxtend.preprocessing import DenseTransformer
from pandas import DataFrame
from sklearn.base import clone
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder as SkLearnOneHotEncoder
from sklearn2pmml.pipeline import PMMLPipeline
import numpy
import os
import sys
sys.path.append(os.path.abspath("../../../../pmml-sklearn/src/test/resources/"))
from common import *
def load_filter_audit(name, filter = False):
if name.endswith("Audit"):
audit_df = load_audit("Audit")
elif name.endswith("AuditNA"):
audit_df = load_audit("AuditNA")
else:
raise ValueError()
audit_X, audit_y = split_csv(audit_df)
if filter:
mask = numpy.ones((audit_X.shape[0], ), dtype = bool)
mask = numpy.logical_and(mask, audit_X.Employment != "SelfEmp")
mask = numpy.logical_and(mask, audit_X.Education != "Professional")
mask = numpy.logical_and(mask, audit_X.Marital != "Divorced")
mask = numpy.logical_and(mask, audit_X.Occupation != "Service")
audit_X = audit_X[mask]
audit_y = audit_y[mask]
return (audit_X, audit_y)
def can_handle_unknown(cat_encoder):
if isinstance(cat_encoder, Pipeline):
cat_encoder = cat_encoder.steps[0][1]
if isinstance(cat_encoder, OrdinalEncoder):
return (cat_encoder.handle_unknown != "error") and (cat_encoder.handle_unknown != "value")
else:
return cat_encoder.handle_unknown != "error"
def build_audit(cat_encoder, cont_encoder, classifier, name, **pmml_options):
audit_X, audit_y = load_filter_audit(name, can_handle_unknown(cat_encoder))
steps = [
("mapper", ColumnTransformer([
("cat", cat_encoder, ["Employment", "Education", "Marital", "Occupation", "Gender"]),
("cont", cont_encoder, ["Age", "Income", "Hours"])
])),
("classifier", classifier)
]
if isinstance(classifier, HistGradientBoostingClassifier):
steps.insert(1, ("formatter", DenseTransformer()))
pipeline = PMMLPipeline(steps)
pipeline.fit(audit_X, audit_y)
pipeline.configure(**pmml_options)
store_pkl(pipeline, name)
audit_X, audit_y = load_filter_audit(name, False)
adjusted = DataFrame(pipeline.predict(audit_X), columns = ["Adjusted"])
adjusted_proba = DataFrame(pipeline.predict_proba(audit_X), columns = ["probability(0)", "probability(1)"])
adjusted = pandas.concat((adjusted, adjusted_proba), axis = 1)
store_csv(adjusted, name)
if __name__ == "__main__":
classifier = LogisticRegression()
build_audit(OneHotEncoder(handle_missing = "error", handle_unknown = "error"), "passthrough", clone(classifier), "OneHotEncoderAudit")
build_audit(Pipeline([("ordinal", OrdinalEncoder(handle_missing = "error", handle_unknown = "value")), ("ohe", SkLearnOneHotEncoder())]), "passthrough", clone(classifier), "OrdinalEncoderAudit")
classifier = HistGradientBoostingClassifier(random_state = 13)
build_audit(OneHotEncoder(handle_missing = "value", handle_unknown = "error"), "passthrough", clone(classifier), "OneHotEncoderAuditNA")
build_audit(Pipeline([("ordinal", OrdinalEncoder(handle_missing = "value", handle_unknown = "error")), ("ohe", SkLearnOneHotEncoder())]), "passthrough", clone(classifier), "OrdinalEncoderAuditNA")
classifier = LogisticRegression()
build_audit(BaseNEncoder(base = 2, drop_invariant = True, handle_missing = "error", handle_unknown = "error"), "passthrough", clone(classifier), "Base2EncoderAudit")
build_audit(BaseNEncoder(base = 2, handle_missing = "value", handle_unknown = "value"), SimpleImputer(), clone(classifier), "Base2EncoderAuditNA")
build_audit(Pipeline([("basen", BaseNEncoder(base = 3, drop_invariant = True, handle_missing = "error", handle_unknown = "error")), ("ohe", SkLearnO | neHotEncoder())]), "passthrough", clone(classifier), "Base3EncoderAudit")
build_audit(Pipeline([("basen", BaseNEnc | oder(base = 3, handle_missing = "value", handle_unknown = "value")), ("ohe", SkLearnOneHotEncoder(handle_unknown = "ignore"))]), SimpleImputer(), clone(classifier), "Base3EncoderAuditNA")
classifier = HistGradientBoostingClassifier(random_state = 13)
build_audit(BaseNEncoder(base = 4, drop_invariant = True, handle_missing = "error", handle_unknown = "error"), "passthrough", clone(classifier), "Base4EncoderAudit")
build_audit(BaseNEncoder(base = 4, handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "Base4EncoderAuditNA")
rf_pmml_options = {"compact" : False, "numeric": True}
classifier = RandomForestClassifier(n_estimators = 71, random_state = 13)
build_audit(BinaryEncoder(handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "BinaryEncoderAudit", **rf_pmml_options)
build_audit(CatBoostEncoder(a = 0.5, handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "CatBoostEncoderAudit", **rf_pmml_options)
build_audit(CountEncoder(normalize = True, min_group_size = 0.05, handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "CountEncoderAudit", **rf_pmml_options)
build_audit(LeaveOneOutEncoder(handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "LeaveOneOutEncoderAudit", **rf_pmml_options)
build_audit(TargetEncoder(handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "TargetEncoderAudit", **rf_pmml_options)
build_audit(WOEEncoder(handle_missing = "error", handle_unknown = "value"), "passthrough", clone(classifier), "WOEEncoderAudit", **rf_pmml_options)
classifier = HistGradientBoostingClassifier(random_state = 13)
build_audit(BinaryEncoder(handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "BinaryEncoderAuditNA")
build_audit(CatBoostEncoder(a = 0.5, handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "CatBoostEncoderAuditNA")
build_audit(CountEncoder(min_group_size = 10, handle_missing = "value", handle_unknown = 10), "passthrough", clone(classifier), "CountEncoderAuditNA")
build_audit(LeaveOneOutEncoder(handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "LeaveOneOutEncoderAuditNA")
build_audit(TargetEncoder(handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "TargetEncoderAuditNA")
build_audit(WOEEncoder(handle_missing = "value", handle_unknown = "value"), "passthrough", clone(classifier), "WOEEncoderAuditNA") |
plotly/plotly.py | packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/title/_side.py | Python | mit | 527 | 0 | import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__ | (
se | lf,
plotly_name="side",
parent_name="scatterpolargl.marker.colorbar.title",
**kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
|
stewartadam/audio-convert-mod | src/audio_convert_mod/acmlogger.py | Python | gpl-2.0 | 3,746 | 0.010144 | # -*- coding: utf-8 -*-
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Stewart Adam
# This file is part of audio-convert-mod.
# audio-convert-mod is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# audio-convert-mod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more | details.
# You should have received a copy of the GNU General Public License
# along with audio-convert-mod; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
The audio-convert-mod logger
Based on fwbackups's fwlogger.py file (2009-07-29)
"""
import da | tetime
import logging
import types
from audio_convert_mod.const import *
from audio_convert_mod.i18n import _
L_DEBUG = logging.DEBUG
L_INFO = logging.INFO
L_WARNING = logging.WARNING
L_ERROR = logging.ERROR
L_CRITICAL = logging.CRITICAL
LOGGERS = {'main': 'acm-main'}
LEVELS = {'debug': 10,
'info': 20,
'warning': 30,
'error': 40,
'critical': 50}
def getLogger():
"""Retrieve the audio-convert-mod logger"""
logging.setLoggerClass(acmLogger)
logger = logging.getLogger(LOGGERS['main'])
# reset to prevent excessive logging from other applications
logging.setLoggerClass(logging.Logger)
return logger
def shutdown():
"""Shut down the logging system"""
logging.shutdown()
class acmLogger(logging.Logger):
"""A subclass to logging.Logger"""
def __init__(self, name, level=logging.DEBUG):
"""Setup the audio-convert-mod logger, text mode"""
logging.Logger.__init__(self, name, level)
self.__printToo = False
self.__functions = []
self.__newmessages = False
try:
# need a handler
loghandler = logging.FileHandler(LOGLOC, 'a')
# Create formatter & add formatter to handler
logformatter = logging.Formatter("%(message)s")
loghandler.setFormatter(logformatter)
# add handler to logger
self.addHandler(loghandler)
except Exception, error:
print _('Could not set up the logger!')
raise
def setPrintToo(self, printToo):
"""If printToo is True, print messages to stdout as we log them"""
self.__printToo = printToo
def getPrintToo(self):
"""Retrieves the printToo property"""
return self.__printToo
def unconnect(self, function):
"""Disconnects a function from logmsg. Returns true if disconnected, false
if that function was not connected."""
try:
self.__functions.remove(function)
return True
except ValueError:
return False
def connect(self, function):
"""Connects a function to logmsg. `function' must be passed as an instance,
not the function() call itself.
Function will be given the severity and message as arguments:
def callback(severity, message)"""
self.__functions.append(function)
def logmsg(self, severity, message):
"""Logs a message. Severity is one of 'debug', 'info', 'warning', 'error'
or 'critical'."""
date = datetime.datetime.now().strftime('%b %d %H:%M:%S')
level = self.getEffectiveLevel()
if level <= LEVELS[severity.lower()]:
entry = '%s :: %s : %s' % (date, _(severity.upper()), message)
# pull in & execute the appropriate function
getattr(self, severity.lower())(entry)
if self.__printToo:
print entry
for i in self.__functions:
i(severity.lower(), entry)
|
eunchong/build | third_party/buildbot_8_4p1/buildbot/locks.py | Python | bsd-3-clause | 10,720 | 0.001772 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from twisted.internet import reactor, defer
from buildbot import util
if False: # for debugging
debuglog = log.msg
else:
debuglog = lambda m: None
class BaseLock:
"""
Class handling claiming and releasing of L{self}, and keeping track of
current and waiting owners.
@note: Ideally, we'd like to maintain FIFO order. The place to do that
would be the L{isAvailable()} function. However, this function is
called by builds/steps both for the first time, and after waking
them up by L{self} from the L{self.waiting} queue. There is
currently no way of distinguishing between them.
"""
description = "<BaseLock>"
def __init__(self, name, maxCount=1):
self.name = name # Name of the lock
self.waiting = [] # Current queue, tuples (LockAccess, deferred)
self.owners = [] # Current owners, tuples (owner, LockAccess)
self.maxCount = maxCount # maximal number of counting owners
def __repr__(self):
return self.description
def _getOwnersCount(self):
""" Return the number of current exclusive and counting owners.
@return: Tuple (number exclusive owners, number counting owners)
"""
num_excl, num_counting = 0, 0
for owner in self.owners:
if owner[1].mode == 'exclusive':
num_excl = num_excl + 1
else: # mode == 'counting'
num_counting = num_counting + 1
assert (num_excl == 1 and num_counting == 0) \
or (num_excl == 0 and num_counting <= self.maxCount)
return num_excl, num_counting
def isAvailable(self, access):
""" Return a boolean whether the lock is available for claiming """
debuglog("%s isAvailable(%s): self.owners=%r"
% (self, access, self.owners))
num_excl, num_counting = self._getOwnersCount()
if access.mode == 'counting':
# Wants counting access
return num_excl == 0 and num_counting < self.maxCount
else:
# Wants exclusive access
return num_excl == 0 and num_counting == 0
def claim(self, owner, access):
""" Claim the lock (lock must be available) """
debuglog("%s claim(%s, %s)" % (self, owner, access.mode))
assert owner is not None
assert self.isAvailable(access), "ask for isAvailable() first"
assert isinstance(access, LockAccess)
assert access.mode in ['counting', 'exclusive']
self.owners.append((owner, access))
debuglog(" %s is claimed '%s'" % (self, access.mode))
def release(self, owner, access):
""" Release the lock """
assert isinstance(access, LockAccess)
debuglog("%s release(%s, %s)" % (self, owner, access.mode))
entry = (owner, access)
assert entry in self.owners
self.owners.remove(entry)
# who can we wake up?
# After an exclusive access, we may need to wake up several waiting.
# Break out of the loop when the first waiting client should not be awakened.
num_excl, num_counting = self._getOwnersCount()
while len(self.waiting) > 0:
access, d = self.waiting[0]
if access.mode == 'counting':
if num_excl > 0 or num_counting == self.maxCount:
break
else:
num_counting = num_counting + 1
else:
# access.mode == 'exclusive'
if num_excl > 0 or num_counting > 0:
break
| else:
num_excl = num_excl + 1
del self.waiting[0]
reactor.callLater(0, d.callback, self)
def waitUntilMaybeAvailable(self, owner, access):
| """Fire when the lock *might* be available. The caller will need to
check with isAvailable() when the deferred fires. This loose form is
used to avoid deadlocks. If we were interested in a stronger form,
this would be named 'waitUntilAvailable', and the deferred would fire
after the lock had been claimed.
"""
debuglog("%s waitUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
if self.isAvailable(access):
return defer.succeed(self)
d = defer.Deferred()
self.waiting.append((access, d))
return d
def stopWaitingUntilAvailable(self, owner, access, d):
debuglog("%s stopWaitingUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
assert (access, d) in self.waiting
self.waiting.remove( (access, d) )
def isOwner(self, owner, access):
return (owner, access) in self.owners
class RealMasterLock(BaseLock):
def __init__(self, lockid):
BaseLock.__init__(self, lockid.name, lockid.maxCount)
self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
def getLock(self, slave):
return self
class RealSlaveLock:
def __init__(self, lockid):
self.name = lockid.name
self.maxCount = lockid.maxCount
self.maxCountForSlave = lockid.maxCountForSlave
self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
self.maxCount,
self.maxCountForSlave)
self.locks = {}
def __repr__(self):
return self.description
def getLock(self, slavebuilder):
slavename = slavebuilder.slave.slavename
if not self.locks.has_key(slavename):
maxCount = self.maxCountForSlave.get(slavename,
self.maxCount)
lock = self.locks[slavename] = BaseLock(self.name, maxCount)
desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
slavename, id(lock))
lock.description = desc
self.locks[slavename] = lock
return self.locks[slavename]
class LockAccess(util.ComparableMixin):
""" I am an object representing a way to access a lock.
@param lockid: LockId instance that should be accessed.
@type lockid: A MasterLock or SlaveLock instance.
@param mode: Mode of accessing the lock.
@type mode: A string, either 'counting' or 'exclusive'.
"""
compare_attrs = ['lockid', 'mode']
def __init__(self, lockid, mode):
self.lockid = lockid
self.mode = mode
assert isinstance(lockid, (MasterLock, SlaveLock))
assert mode in ['counting', 'exclusive']
class BaseLockId(util.ComparableMixin):
""" Abstract base class for LockId classes.
Sets up the 'access()' function for the LockId's available to the user
(MasterLock and SlaveLock classes).
Derived classes should add
- Comparison with the L{util.ComparableMixin} via the L{compare_attrs}
class variable.
- Link to the actual lock class should be added with the L{lockClass}
class variable.
"""
def access(self, mode):
""" Express how the lock should be accessed """
assert mode in ['counting', 'exclusive']
return LockAccess(self, mode)
def defaultAccess(self):
""" For buildbot 0.7.7 compability: When user doesn't specify |
whitepages/nova | nova/tests/functional/api_sample_tests/test_flavor_access.py | Python | apache-2.0 | 4,391 | 0 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = 'flavor-access'
def _get_flags(self):
f = super(FlavorAccessSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_access.Flavor_access')
# FlavorAccess extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavormanage.Flavormanage')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_disabled.Flavor_disabled')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavorextradata.Flavorextradata')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_swap.Flavor_swap')
return f
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
'flavor_id': 10,
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
subs)
self._verify_response('flavor-access-add-tenant-resp',
subs, response, 200)
def _create_flavor(self):
subs = {
'flavor_id': 10,
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
"flavor-access-create-req",
subs)
subs.update(self._get_regexes())
self._verify_response("flavor-access-create-resp", subs, response, 200)
def test_flavor_access_create(self):
self._create_flavor()
def test_flavor_access_detail(self):
response = self._do_get('flavors/detail')
subs = self._get_regexes()
self._verify_response('flavor-access-detail-resp', subs, response, 200)
def test_flavor_access_list(self):
self._create_flavor()
self. | _add_tenant()
flavor_id = 10
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
subs = {
'flavor_id': flavor_id,
'tenant_id': 'fake_tenant',
}
self._verify_response('flavor-access-list-resp', subs, response, 200)
def test_flavor_access_show(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id
}
| subs.update(self._get_regexes())
self._verify_response('flavor-access-show-resp', subs, response, 200)
def test_flavor_access_add_tenant(self):
self._create_flavor()
self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
self._add_tenant()
subs = {
'tenant_id': 'fake_tenant',
}
response = self._do_post('flavors/10/action',
"flavor-access-remove-tenant-req",
subs)
exp_subs = {
"tenant_id": self.api.project_id,
"flavor_id": "10"
}
self._verify_response('flavor-access-remove-tenant-resp',
exp_subs, response, 200)
|
MeteorKepler/RICGA | ricga/reference/ssd.py | Python | apache-2.0 | 11,963 | 0.002842 | #!/usr/bin/env python3
# encoding: utf-8
# Author: MeteorsHub
# License: BSD | Licence
# Contact: JimRanor@outlook.com
# Site: http://www.meteorshub.com
# File: ssd.py
# Time: 2017/4/24 10:17
'''
"""Keras implementation of SSD."""
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.layers import Activation
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import GlobalAveragePooling2D
| from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import Reshape
from keras.layers import ZeroPadding2D
from keras.layers.merge import Concatenate
from keras.models import Model
from ricga.reference.ssd_layers import Normalize
from ricga.reference.ssd_layers import PriorBox
def SSD300(input_shape, num_classes=21):
"""SSD300 architecture.
# Arguments
input_shape: Shape of the input image,
expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
num_classes: Number of classes including background.
pretrain: If true, model will use weights pretrained on voc.
# References
https://arxiv.org/abs/1512.02325
"""
net = {}
# Block 1
input_tensor = Input(shape=input_shape)
img_size = (input_shape[1], input_shape[0])
net['input'] = input_tensor
net['conv1_1'] = Conv2D(64, (3, 3), activation="relu", name="conv1_1", padding="same")(net['input'])
net['conv1_2'] = Conv2D(64, (3, 3), activation="relu", name="conv1_2", padding="same")(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool1')(net['conv1_2'])
# Block 2
net['conv2_1'] = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(net['pool1'])
net['conv2_2'] = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool2')(net['conv2_2'])
# Block 3
net['conv3_1'] = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(net['pool2'])
net['conv3_2'] = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool3')(net['conv3_3'])
# Block 4
net['conv4_1'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(net['pool3'])
net['conv4_2'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool4')(net['conv4_3'])
# Block 5
net['conv5_1'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(net['pool4'])
net['conv5_2'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='pool5')(net['conv5_3'])
# FC6
net['fc6'] = Conv2D(1024, (3, 3), dilation_rate=(6, 6), activation='relu', padding='same', name='fc6')(net['pool5'])
# x = Dropout(0.5, name='drop6')(x)
# FC7
net['fc7'] = Conv2D(1024, (1, 1), activation='relu', padding='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
net['conv6_1'] = Conv2D(256, (1, 1), activation='relu', padding='same', name='conv6_1')(net['fc7'])
net['conv6_2'] = Conv2D(512, (3, 3), strides=(2, 2), activation='relu', padding='same', name='conv6_2')(
net['conv6_1'])
# Block 7
net['conv7_1'] = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
net['conv7_2'] = Conv2D(256, (3, 3), strides=(2, 2), activation='relu', padding='valid', name='conv7_2')(
net['conv7_2'])
# Block 8
net['conv8_1'] = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Conv2D(256, (3, 3), strides=(2, 2), activation='relu', padding='same', name='conv8_2')(
net['conv8_1'])
# Last Pool
net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
# Prediction from conv4_3
net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
num_priors = 3
x = Conv2D(num_priors * 4, (3, 3), padding='same', name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
net['conv4_3_norm_mbox_loc'] = x
flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
name = 'conv4_3_norm_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Conv2D(num_priors * num_classes, (3, 3), padding='same', name=name)(net['conv4_3_norm'])
net['conv4_3_norm_mbox_conf'] = x
flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
# Prediction from fc7
num_priors = 6
net['fc7_mbox_loc'] = Conv2D(num_priors * 4, (3, 3), padding='same', name='fc7_mbox_loc')(net['fc7'])
flatten = Flatten(name='fc7_mbox_loc_flat')
net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
name = 'fc7_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
net['fc7_mbox_conf'] = Conv2D(num_priors * num_classes, (3, 3),
padding='same',
name=name)(net['fc7'])
flatten = Flatten(name='fc7_mbox_conf_flat')
net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
# Prediction from conv6_2
num_priors = 6
x = Conv2D(num_priors * 4, (3, 3), padding='same',
name='conv6_2_mbox_loc')(net['conv6_2'])
net['conv6_2_mbox_loc'] = x
flatten = Flatten(name='conv6_2_mbox_loc_flat')
net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
name = 'conv6_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Conv2D(num_priors * num_classes, (3, 3), padding='same',
name=name)(net['conv6_2'])
net['conv6_2_mbox_conf'] = x
flatten = Flatten(name='conv6_2_mbox_conf_flat')
net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
# Prediction from conv7_2
num_priors = 6
x = Conv2D(num_priors * 4, (3, 3), padding='same',
name='conv7_2_mbox_loc')(net['conv7_2'])
net['conv7_2_mbox_loc'] = x
flatten = Flatten(name='conv7_2_mbox_loc_flat')
net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
name = 'conv7_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Conv2D(num_priors * num_classes, (3, 3), padding='same',
name=name)(net['conv7_2'])
net['conv7_2_mbox_conf'] = x
flatten = Flatten(name='conv7_2_mbox_conf_flat')
net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[ |
GiulioGx/RNNs | sources/datasets/LupusDataset.py | Python | lgpl-3.0 | 25,918 | 0.00355 | import abc
import math
import numpy
from scipy.io import loadmat
from Configs import Configs
from Paths import Paths
from datasets.LupusFilter import VisitsFilter, \
NullFIlter, TemporalSpanFilter
from infos.Info import Info, NullInfo
from infos.InfoElement import SimpleDescription, PrintableInfoElement
from infos.InfoGroup import InfoGroup
from infos.InfoList import InfoList
from datasets.Batch import Batch
from datasets.Dataset import Dataset
import os
# TO CHECK normalization, selected features
class BuildBatchStrategy(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def build_batch(self, patience):
"""build up a batch according to the strategy"""
@abc.abstractmethod
def keys(self) -> list:
"""return the keys of the sets to be used with this strategy"""
@abc.abstractmethod
def n_in(self, num_pat_feats):
"""returns the number of features each batch is composed of given the number of features
of each visits of a patience"""
class PerVisitTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def __init__(self):
pass
def keys(self) -> list:
return ['early_pos', 'late_pos', 'neg']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
n_visits = len(targets)
mask = numpy.ones_like(feats)
return feats[0:n_visits - 1, :], targets[1:n_visits, :], mask
class PerPatienceTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs = numpy.zeros(shape=(first_positive_idx, 1), dtype=Configs.floatType)
outputs[-1] = 1
inputs = feats[0:first_positive_idx, :]
else:
inputs = feats[0:-1, :]
outputs = targets[0:-1, :]
mask = numpy.zeros_like(outputs)
mask[-1, :] = 1
return inputs, outputs, mask
class TemporalDifferenceTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
outputs = numpy.zeros(shape=(1, 1), dtype=Configs.floatType)
inputs = numpy.zeros(shape=(1, feats.shape[1]), dtype=Configs.floatType)
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs[0, :] = 1
inputs[0, :] = feats[first_positive_idx - 1, :] - feats[0, :]
else:
inputs[0, | :] = feats[-2, :] - feats[0, :]
outputs[0, :] = targets[-2, :]
mask = numpy.zeros_like(outputs)
| mask[0, :] = 1
return inputs, outputs, mask
class LastAndFirstVisitsTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats * 2
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
outputs = numpy.zeros(shape=(1, 1), dtype=Configs.floatType)
inputs = numpy.zeros(shape=(1, feats.shape[1] * 2), dtype=Configs.floatType)
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs[0, :] = 1
inputs[0, :] = numpy.concatenate((feats[first_positive_idx - 1, :], feats[0, :]), axis=0)
else:
inputs[0, :] = numpy.concatenate((feats[-2, :], feats[0, :]), axis=0)
outputs[0, :] = targets[-2, :]
mask = numpy.zeros_like(outputs)
mask[0, :] = 1
return inputs, outputs, mask
class LupusDataset(Dataset):
@staticmethod
def parse_mat(mat_file: str, feature_names:list=None):
mat_obj = loadmat(mat_file)
positive_patients = mat_obj['pazientiPositivi']
negative_patients = mat_obj['pazientiNegativi']
features_struct = mat_obj['selectedFeatures']
# features_struct = mat_obj['featuresVip7']
features_names = LupusDataset.__find_features_names(features_struct) if feature_names is None else feature_names
# features_names = ['DNA', 'arthritis', 'c3level', 'c4level', 'hematological', 'skinrash', 'sledai2kInferred']
#
# features_names = ['APS', 'DNA', 'FM', 'Hashimoto', 'MyasteniaGravis', 'SdS',
# 'arterialthrombosis', 'arthritis', 'c3level', 'c4level', 'dislipidemia', 'hcv',
# 'hematological', 'hypertension', 'hypothyroidism', 'kidney', 'mthfr', 'npsle',
# 'pregnancypathology', 'serositis', 'sex', 'skinrash', 'sledai2kInferred',
# 'venousthrombosis']
# first 10 in ranking
# features_names = ["c4level", "c3level", "arthritis", "arterialthrombosis", "SdS", "MyasteniaGravis", "Hashimoto", "FM", "DNA", "APS"]
# last 10 in ranking
# features_names = ["venousthrombosis", "sledai2kInferred", "skinrash", "sex", "serositis", "pregnancypathology",
# "npsle", "mthfr", "kidney", "hypothyroidism"]
# last 10 without sledai
# features_names = ["venousthrombosis", "serositis", "pregnancypathology", "mthfr", "kidney", "hypothyroidism"]
# in-between
# features_names = ["hypertension", "hematological", "hcv", "dislipidemia"]
# features_names = ['APS' 'DNA' 'FM' 'Hashimoto' 'MyasteniaGravis' 'SdS' 'age'
# 'arterialthrombosis' 'arthritis' 'c3level' 'c4level' 'dislipidemia' 'hcv'
# 'hematological' 'hypertension' 'hypothyroidism' 'kidney' 'mthfr' 'npsle'
# 'pregnancypathology' 'serositis' 'sex' 'skinrash' 'sledai2kInferred'
# 'venousthrombosis' 'yearOfDisease']
# features_names = ['age', 'MyasteniaGravis', 'arthritis', 'c3level', 'c4level', 'hematological', 'skinrash', 'sledai2kInferred']
return positive_patients, negative_patients, features_names
@staticmethod
def load_mat(mat_file: str, visit_selector: VisitsFilter = NullFIlter(), seed=Configs.seed, features_names:list=None):
positive_patients, negative_patients, features_names = LupusDataset.parse_mat(mat_file=mat_file, feature_names=features_names)
data = numpy.concatenate((positive_patients, negative_patients), axis=0)
features_normalizations = LupusDataset.__find_normalization_factors(features_names, data)
result = LupusDataset.__process_patients(data, features_names, features_normalizations,
visit_selector=visit_selector)
####################
# folder = '/home/giulio'
# os.makedirs(folder, exist_ok=True)
# prefix = folder + '/'
# file = open(prefix + "visits_pos.txt", "w")
# exs = result["late_pos"]
# for i in range(len(exs)):
# pat = LupusDataset.__get_patience_descr(exs[i]) + "\n"
# file.write(pat)
# file.close()
####################
early_positives = result["early_pos"]
late_positives = result["late_pos"]
negatives = result["neg"]
rng = numpy.random.RandomState(seed)
rng.shuffle(early_positives)
rng.shuffle(late_positives)
rng.shuffle(negatives)
infos = InfoGroup('Lupus Dataset', InfoList(PrintableInfoElement('features', '', features_names),
|
courtarro/gnuradio | gr-utils/python/modtool/gr-newmod/docs/doxygen/swig_doc.py | Python | gpl-3.0 | 11,634 | 0.003008 | #
# Copyright 2010-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public Licen | se
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Creates the swig_doc.i SWIG interface file.
Execute using: python swig_doc.py xml_path outputfilename
The file instructs SWIG to transfer the doxygen comments into the
python docstrings.
"" | "
import sys, time
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
from doxyxml import DoxyOther, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
"""
Checks if doxyxml produced objects correspond to a gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
friendname = make_name(item.name())
is_a_block = item.has_member(friendname, DoxyFriend)
# But now sometimes the make function isn't a friend so check again.
if not is_a_block:
is_a_block = di.has_member(friendname, DoxyFunction)
return is_a_block
class Block2(object):
"""
Checks if doxyxml produced objects correspond to a new style
gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
return is_a_block2
def utoascii(text):
"""
Convert unicode text into ascii and escape quotes.
"""
if text is None:
return ''
out = text.encode('ascii', 'replace')
out = out.replace('"', '\\"')
return out
def combine_descriptions(obj):
"""
Combines the brief and detailed descriptions of an object together.
"""
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
def format_params(parameteritems):
output = ['Args:']
template = ' {0} : {1}'
for pi in parameteritems:
output.append(template.format(pi.name, pi.description))
return '\n'.join(output)
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
"""
Create a docstring entry for a swig interface file.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
templ - an optional template for the docstring containing only one
variable named 'description'.
description - if this optional variable is set then it's value is
used as the description instead of extracting it from obj.
"""
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
if params:
description += '\n\n'
description += utoascii(format_params(params))
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
"""
Create a function docstring entry for a swig interface file.
func - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to func.name())
description - if this optional variable is set then it's value is
used as the description instead of extracting it from func.
params - a parameter list that overrides using func.params.
"""
#if params is None:
# params = func.params
#params = [prm.declname for prm in params]
#if params:
# sig = "Params: (%s)" % ", ".join(params)
#else:
# sig = "Params: (NONE)"
#templ = "{description}\n\n" + sig
#return make_entry(func, name=name, templ=utoascii(templ),
# description=description)
return make_entry(func, name=name, description=description, params=params)
def make_class_entry(klass, description=None, ignored_methods=[], params=None):
"""
Create a class docstring for a swig interface file.
"""
if params is None:
params = klass.params
output = []
output.append(make_entry(klass, description=description, params=params))
for func in klass.in_category(DoxyFunction):
if func.name() not in ignored_methods:
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
"""
Create class and function docstrings of a gnuradio block for a
swig interface file.
"""
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
output.append(make_func_entry(make_func, description=super_description,
params=block.params))
return "\n\n".join(output)
def make_block2_entry(di, block):
"""
Create class and function docstrings of a new style gnuradio block for a
swig interface file.
"""
descriptions = []
# For new style blocks all the relevant documentation should be
# associated with the 'make' method.
class_description = combine_descriptions(block)
make_func = block.get_member('make', DoxyFunction)
make_description = combine_descriptions(make_func)
description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(
block, description=description,
ignored_methods=['make'], params=make_func.params))
makename = block.name() + '::make'
output.append(make_func_entry(
make_func, name=makename, description=description,
params=make_func.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated usi |
overdev/PythonOS-1.01-cp2-cp3 | apps/calculator/__init__.py | Python | mit | 9,834 | 0.01444 | import pyos
import math
ans = 0
def sqrt(n):
return math.sqrt(n)
def nrt(r, n):
return n**(1.0/r)
def onStart(s, a):
global state, app
state = s
app = a
calc = Calculator()
class Calculator(object):
def __init__(self):
app.ui.clearChildren()
self.input = ""
self.showingAns = False
self.compField = pyos.GUI.Text((0, 20), "0", state.getColorPalette().getColor("item"), 40)
self.ansField = pyos.GUI.Text((2, 2), "0", state.getColorPalette().getColor("item"), 16)
l_paren = pyos.GUI.Button((40, 240), "(", state.getColorPalette().getColor("item"), state.getColorPalette().getColor("background"), 24,
width=40, height=40, onClick=self.addInput, onClickData=("(",),
border=1, borderColor=(20,20,20))
r_paren = pyos.GUI.Button((120, 240), ")", state.getColorPalette().getColor("item"), state.getColorPalette().getColor("background"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(")",),
border=1, borderColor=(20,20,20))
ansbtn = pyos.GUI.Button((80, 240), "ans", state.getColorPalette().getColor("item"), state.getColorPalette().getColor("background"), 24,
width=40, height=40, onClick=self.addInput, onClickData=("ans",),
border=1, borderColor=(20,20,20))
app.ui.addChild(l_paren)
app.ui.addChild(r_paren)
app.ui.addChild(ansbtn)
app.ui.addChild(self.compField)
app.ui.addChild(self.ansField)
self.addNumBtns()
self.addFunctionButtons()
self.addSpecialButtons()
def addInput(self, data):
if self.input == "0" or self.showingAns:
if data == "+" or data == "-" or data == "*" or data == "/" or data == "**":
data = "ans" + data
self.input = str(data)
if self.showingAns:
self.showingAns = False
self.ansField.text = str(self.compField.text)
self.ansField.refresh()
else:
self.input += str(data)
self.compField.text = str(self.input)
self.compField.refresh()
def bkspcInput(self):
if len(self.input) > 0:
self.input = self.input[:len(self.input)-1]
self.compField.text = str(self.input)
self.compField.refresh()
def clearInput(self):
self.input = "0"
self.compField.text = str(self.input)
self.compField.refresh()
def evaluate(self):
try:
curr_ans = self.ansField.text
bestans = curr_ans
if self.ansField.text.find(".") != -1:
bestans = float(curr_ans)
else:
bestans = int(curr_ans)
self.compField.text = eval(self.input, {"sqrt": sqrt, "nrt": nrt, "ans": bestans, "pi": math.pi})
except:
self.compField.text = "err"
self.showingAns = True
self.compField.refresh()
def addNumBtns(self):
self.numBtns = []
self.numBtns.append(pyos.GUI.Button((40, 80), "7", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(7,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((80, 80), "8", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(8,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((120, 80), "9", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(9,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((40, 120), "4", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(4,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((80, 120), "5", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(5,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((120, 120), "6", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(6,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((40, 160), "1", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(1,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((80, 160), "2", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(2,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((120, 160), "3", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(3,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((80, 200), "0", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24 | ,
| width=40, height=40, onClick=self.addInput, onClickData=(0,),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((40, 200), ",", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(",",),
border=1, borderColor=(200,200,200)))
self.numBtns.append(pyos.GUI.Button((120, 200), ".", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"), 24,
width=40, height=40, onClick=self.addInput, onClickData=(".",),
border=1, borderColor=(200,200,200)))
for nb in self.numBtns:
app.ui.addChild(nb)
def addFunctionButtons(self):
self.fncBtns = []
self.fncBtns.append(pyos.GUI.Button((160, 80), "+", (123, 209, 237), (20, 20, 20), 24,
width=80, height=40, onClick=self.addInput, onClickData=("+",)))
self.fncBtns.append(pyos.GUI.Button((160, 120), "-", (113, 199, 227), (20, 20, 20), 24,
width=80, height=40, onClick=self.addInput, onClickData=("-",)))
self.fncBtns.append(pyos.GUI.Button((160, 160), "x", (103, 189, 217), (20, 20, 20), 24,
width=80, height=40, onClick=self.addInput, onClickData=("*",)))
self.fncBtns.append(pyos.GUI.Button((160, 200), "/", (93, 179, 207), (20, 20, 20), 24,
|
west2554/fofix | src/SongChoosingScene.py | Python | gpl-2.0 | 43,169 | 0.020501 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt | #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Fre | e Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from __future__ import with_statement
from Scene import Scene, SuppressScene
import os
import time
import Player
import Dialogs
import Song
import Config
import pygame
import Version
from Menu import Menu
from Settings import ConfigChoice, ActiveConfigChoice
from Language import _
from Camera import Camera
from Mesh import Mesh
from Texture import Texture
import Log #MFH
PRACTICE = 1
CAREER = 2
instrumentDiff = {
0 : (lambda a: a.diffGuitar),
1 : (lambda a: a.diffGuitar),
2 : (lambda a: a.diffBass),
3 : (lambda a: a.diffGuitar),
4 : (lambda a: a.diffDrums),
5 : (lambda a: a.diffVocals)
}
class SongChoosingScene(Scene):
def __init__(self, engine, libraryName = None, songName = None):
Scene.__init__(self, engine)
if self.engine.world.sceneName == "SongChoosingScene": #MFH - dual / triple loading cycle fix
Log.warn("Extra SongChoosingScene was instantiated, but detected and shut down. Cause unknown.")
raise SuppressScene #stump
else:
self.engine.world.sceneName = "SongChoosingScene"
if self.engine.config.get("debug", "use_new_song_database"):
Song.updateSongDatabase(self.engine)
self.wizardStarted = False
self.libraryName = libraryName
self.songName = songName
if not self.libraryName:
self.libraryName = self.engine.config.get("setlist", "selected_library")
if not self.libraryName:
self.libraryName = Song.DEFAULT_LIBRARY
if not self.songName:
self.songName = self.engine.config.get("setlist", "selected_song")
self.gameMode = self.engine.world.gameMode
self.careerMode = (self.gameMode == CAREER)
self.practiceMode = (self.gameMode == PRACTICE)
self.gameMode2p = self.engine.world.multiMode
self.autoPreview = not self.engine.config.get("audio", "disable_preview")
self.sortOrder = self.engine.config.get("game", "sort_order")
self.tut = self.engine.world.tutorial
self.playerList = self.players
self.gameStarted = False
self.gamePlayers = len(self.playerList)
self.parts = [None for i in self.playerList]
self.diffs = [None for i in self.playerList]
self.time = 0
self.lastTime = 0
self.mode = 0
self.moreInfo = False
self.moreInfoTime = 0
self.miniLobbyTime = 0
self.selected = 0
self.camera = Camera()
self.cameraOffset = 0.0
self.song = None
self.songLoader = None
self.loaded = False
text = _("Initializing Setlist...")
if self.engine.cmdPlay == 2:
text = _("Checking Command-Line Settings...")
elif len(self.engine.world.songQueue) > 0:
text = _("Checking Setlist Settings...")
elif len(self.engine.world.songQueue) == 0:
self.engine.world.playingQueue = False
self.splash = Dialogs.showLoadingSplashScreen(self.engine, text)
self.items = []
self.cmdPlay = False
self.queued = True
self.loadStartTime = time.time()
if self.tut == True:
self.library = self.engine.tutorialFolder
else:
self.library = os.path.join(self.engine.config.get("setlist", "base_library"), self.libraryName)
if not os.path.isdir(self.engine.resource.fileName(self.library)):
self.library = self.engine.resource.fileName(os.path.join(self.engine.config.get("setlist", "base_library"), Song.DEFAULT_LIBRARY))
self.searchText = ""
#user configurables and input management
self.listingMode = 0 #with libraries or List All
self.preloadSongLabels = False
self.showCareerTiers = 1+(self.careerMode and 1 or 0) #0-Never; 1-Career Only; 2-Always
self.scrolling = 0
self.scrollDelay = self.engine.config.get("game", "scroll_delay")
self.scrollRate = self.engine.config.get("game", "scroll_rate")
self.scrollTime = 0
self.scroller = [lambda: None, self.scrollUp, self.scrollDown]
self.scoreDifficulty = Song.difficulties[self.engine.config.get("game", "songlist_difficulty")]
self.scorePart = Song.parts[self.engine.config.get("game", "songlist_instrument")]
self.sortOrder = self.engine.config.get("game", "sort_order")
self.queueFormat = self.engine.config.get("game", "queue_format")
self.queueOrder = self.engine.config.get("game", "queue_order")
self.queueParts = self.engine.config.get("game", "queue_parts")
self.queueDiffs = self.engine.config.get("game", "queue_diff")
self.nilShowNextScore = self.engine.config.get("songlist", "nil_show_next_score")
#theme information
self.themename = self.engine.data.themeLabel
self.theme = self.engine.data.theme
#theme configurables
self.setlistStyle = self.engine.theme.setlist.setlistStyle #0 = Normal; 1 = List; 2 = Circular
self.headerSkip = self.engine.theme.setlist.headerSkip #items taken up by header (non-static only)
self.footerSkip = self.engine.theme.setlist.footerSkip #items taken up by footer (non-static only)
self.itemSize = self.engine.theme.setlist.itemSize #delta (X, Y) (0..1) for each item (non-static only)
self.labelType = self.engine.theme.setlist.labelType #Album covers (0) or CD labels (1)
self.labelDistance = self.engine.theme.setlist.labelDistance #number of labels away to preload
self.showMoreLabels = self.engine.theme.setlist.showMoreLabels #whether or not additional unselected labels are rendered on-screen
self.texturedLabels = self.engine.theme.setlist.texturedLabels #render the art as a texture?
self.itemsPerPage = self.engine.theme.setlist.itemsPerPage #number of items to show on screen
self.followItemPos = (self.itemsPerPage+1)/2
self.showLockedSongs = self.engine.theme.setlist.showLockedSongs #whether or not to even show locked songs
self.showSortTiers = self.engine.theme.setlist.showSortTiers #whether or not to show sorting tiers - career tiers take precedence.
self.selectTiers = self.engine.theme.setlist.selectTiers #whether or not tiers should be selectable as a quick setlist.
if self.engine.cmdPlay == 2:
self.songName = Config.get("setlist", "selected_song")
self.libraryName = Config.get("setlist", "selected_library")
self.cmdPlay = self.checkCmdPlay()
if self.cmdPlay:
Dialogs.hideLoadingSplashScreen(self.engine, self.splash)
return
elif len(self.engine.world.songQueue) > 0: |
aperigault/ansible | test/units/executor/test_task_executor.py | Python | gpl-3.0 | 18,912 | 0.001533 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.errors import AnsibleError
from ansible.executor.task_executor import TaskExecutor, remove_omit
from ansible.plugins.loader import action_loader, lookup_loader
from ansible.parsing.yaml.objects import AnsibleUnicode
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a', 'b', 'c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item= | 'c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop_with = 'items'
mock_task.loop = ['a', 'b', 'c']
mock_play_context = MagicMo | ck()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy(exclude_parent=False, exclude_tasks=False):
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._squash_items = MagicMock(return_value=items)
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_squash_items(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
loop_var = 'item'
def _evaluate_conditional(templar, variables):
item = variables.get(loop_var)
if item == 'b':
return False
return True
mock_task = MagicMock()
mock_task.evaluate_conditional.side_effect = _evaluate_conditional
mock_play_context = MagicMock()
mock_shared_loader = None
mock_queue = MagicMock()
new_stdin = None
job_vars = dict(pkg_mgr='yum')
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
# No replacement
mock_task.action = 'yum'
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertIsInstance(mock_task.args, MagicMock)
mock_task.action = 'foo'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
mock_task.action = 'yum'
mock_task.args = {'name': 'static'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': 'static'})
mock_task.action = 'yum'
mock_task.args = {'name': '{{pkg_mgr}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{pkg_mgr}}'})
mock_task.action = '{{unknown}}'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
# Could do something like this to recover from bad deps in a package
job_vars = dict(pkg_mgr='yum', packages=['a', 'b'])
items = ['absent', 'latest']
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages }}', 'state': '{{ item }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages }}', 'state': '{{ item }}'})
# Maybe should raise an error in this case. The user would have to specify:
# - yum: name="{{ packages[item] }}"
# with_items:
# |
qu0zl/pfss | pfss/migrations/0029_auto_20150518_1019.py | Python | mit | 731 | 0.001368 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pfss', '0028_specialability_isstat'),
]
operations = [
migrations.AddField(
model_name='at | tack',
name='bonusToHit',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AlterField(
model_name='attack',
name='attackClass',
field=models.IntegerField(default=0, choices=[(0, b'Primary'), (1, b'Secondary'), (2 | , b'Light'), (3, b'One Handed'), (4, b'Two Handed')]),
preserve_default=True,
),
]
|
heracek/django-nonrel | tests/regressiontests/datatypes/tests.py | Python | bsd-3-clause | 4,241 | 0.005187 | import datetime
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.test import TestCase, skipIfDBFeature
from django.utils import tzinfo
from models import Donut, RumBaba
class DataTypesTestCase(TestCase):
def test_boolean_type(self):
d = Donut(name='Apple Fritter')
self.assertFalse(d.is_frosted)
self.assertTrue(d.has_sprinkles is None)
d.has_sprinkles = True
self.assertTrue(d.has_sprinkles)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertFalse(d2.is_frosted)
self.assertTrue(d2.has_sprinkles)
def test_date_type(self):
d = Donut(name='Apple Fritter')
d.baked_date = datetime.date(year=1938, month=6, day=4)
d.baked_time = datetime.time(hour=5, minute=30)
d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
self.assertEqual(d2.baked_time, datetime.time(5, 30))
self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
def test_time_field(self):
#Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
d = Donut(name='Apple Fritter')
d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
def test_year_boundaries(self):
"""Year boundary tests (ticket #3689)"""
d = Donut.objects.create(name='Date Test 2007',
baked_date=datetime.datetime(year=2007, month=12, day=31),
consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
d1 = Donut.objects.create(name='Date Test 2006',
baked_date=datetime.datetime(year=2006, month=1, day=1),
consumed_at=datetime.datetime(year=2006, month=1, day=1))
self.assertEqual("Date Test 2007",
Donut.objects.filter(baked_date__year=2007)[0].name)
self.assertEqual("Date Test 2006",
Donut.objects.filter(baked_date__year=2006)[0].name)
d2 = Donut.objects.create(name='Apple Fritter',
consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59))
self.assertEqual([u'Apple Fritter', u'Date Test 2007'],
list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True)))
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
def test_textfields_unicode(self):
"""Regression test for #10238: TextField values returned from the
database should be unicode."""
d = Donut.objects.create(name=u'Jelly Donut', review=u'Outstanding')
newd = Donut.objects.get(id=d.id)
self.assert_(isinstance(newd.review, unicode))
@skipIfDBFeature('supports_timezones')
def test_error_on_timezone(self):
"""Regression test for #8354: the MySQL and Oracle backends should raise
an error if given a timezone-aware datetime object."""
dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0))
d = Donut(name='Bear claw', consumed_at=dt)
self.assertRaises(ValueError, d.save)
# ValueError: MySQL backend does not support timezone-aware datetimes.
def test_datefield_auto_now_add(self):
"""Regression test for #10970, auto_now_add for DateField should store
a Python datetime.date, not a datetime.datet | ime"""
b = RumBaba.objects.create()
# Verify we didn't break DateTimeField behavior
self.assert_(isinstance(b. | baked_timestamp, datetime.datetime))
# We need to test this this way because datetime.datetime inherits
# from datetime.date:
self.assert_(isinstance(b.baked_date, datetime.date) and not isinstance(b.baked_date, datetime.datetime))
|
AxelMatstoms/rpg | world/entities/components/position.py | Python | gpl-3.0 | 308 | 0.006494 | from world.entities.components.compone | nt import Component
class Position(Component):
def __init__(self, entity, x, y):
super().__init__(entity)
self.x = x |
self.y = y
def pos(self):
return (self.x, self.y)
def cpos(self):
return (self.y, self.x)
|
miumok98/weblate | weblate/trans/models/dictionary.py | Python | gpl-3.0 | 6,786 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_unicode
from weblate.lang.models import Language
from weblate.trans.formats import AutoFormat
from weblate.trans.models.project import Project
from weblate.trans.util import report_error
from django.core.urlresolvers import reverse
from whoosh.analysis import (
LanguageAnalyzer, StandardAnalyzer, StemmingAnalyzer, NgramAnalyzer
)
from whoosh.lang import has_stemmer
class DictionaryManager(models.Manager):
# pylint: disable=W0232
def upload(self, request, project, language, fileobj, method):
'''
Handles dictionary upload.
'''
from weblate.trans.models.changes import Change
store = AutoFormat.parse(fileobj)
ret = 0
# process all units
for dummy, unit in store.iterate_merge(False):
source = unit.get_source()
target = unit.get_target()
# Ignore too long words
if len(source) > 200 or len(target) > 200:
continue
# Get object
word, created = self.get_or_create(
project=project,
language=language,
source=source,
defaults={
'target': target,
},
)
# Already existing entry found
if not created:
# Same as current -> ignore
if target == word.target:
continue
if method == 'add':
# Add word
word = self.create(
request,
action=Change.ACTION_DICTIONARY_UPLOAD,
project=project,
language=language,
source=source,
target=target
)
elif method == 'overwrite':
# Update word
word.target = target
word.save()
ret += 1
return ret
def create(self, request, **kwargs):
'''
Creates new dictionary object.
'''
from weblate.trans.models.changes import Change
action = kwargs.pop('action', Change.ACTION_DICTIONARY_NEW)
created = super(DictionaryManager, self).create(**kwargs)
Change.objects.create(
action=action,
dictionary=created,
user=request.user,
target=created.target,
)
return created
def get_words(self, unit):
"""
Returns list of word pairs for an unit.
"""
words = set()
# Prepare analyzers
# - standard analyzer simply splits words
# - stemming extracts stems, to catch things like plurals
analyzers = [
StandardAnalyzer(),
StemmingAnalyzer(),
]
lang_code = unit.translation.language.base_code()
# Add per language analyzer if Whoosh has it
if has_stemmer(lang_code):
analyzers.append(LanguageAnalyzer(lang_code))
# Add ngram analyzer for languages like Chinese or Japanese
if unit.translation.language.uses_ngram():
analyzers.append(NgramAnalyzer(4))
# Extract words from all plurals and from context
for text in unit.get_source_plurals() + [unit.context]:
for analyzer in analyzers:
# Some Whoosh analyzers break on unicode
try:
words.update(
[token.text for token in analyzer(force_unicode(text))]
)
except (UnicodeDecodeError, IndexError) as error:
report_error(error, sys.exc_info())
# Grab all words in the dictionary
dictionary = self.filter(
project=unit.translation.subproject.project,
language=unit.translation.language
)
if len(words) == 0:
# No extracted words, no dictionary
dictionary = dictionary.none()
else:
# Build the query for fetching the words
# Can not use __in as we want case insensitive lookup
query = Q()
for word in words:
query |= Q(source__iexact=word)
# Filter dictionary
dictionary = dictionary.filter(query)
return dictionary
class Dictionary(models.Model):
project = models.ForeignKey(Project)
language = models.ForeignKey(Language)
source = models.CharField(max_length=200, db_index=True)
target = models.CharField(max_length=200)
objects = DictionaryManager()
class Meta(object):
ordering = ['source']
permissions = (
('upload_dictionary', "Can import dictionary"),
)
app_label = 'trans'
def __unicode__(self):
return '%s/%s: %s -> %s' % (
self.project,
self.language,
self.source,
self.target
)
def get_absolute_url(self):
return '%s?id=%d' % (
reverse(
'edit_dictionary',
kwargs={
'project': self.project.slug,
'lang': self.language.code
}
),
self.pk
)
def get_parent_url(self):
return reverse(
'show_dictionary',
kwargs={'project': self.project.slug, 'lang': self.language.code}
)
def edit(self, request, source, target):
'''
Edits word in a dictionary.
'''
from weblate.trans.models.changes import Change
self.source = source
self.ta | rget = target
self.save()
Change.objects.create(
action=Change.ACTION_DICTIONARY_EDIT,
| dictionary=self,
user=request.user,
target=self.target,
)
|
Flyingfox646/flyingfox | src/custom/rewards.py | Python | mit | 1,025 | 0 | """
examples:
# Tour awards
# available parameters stats/models.py/class Player
# streak 100 or more
def fighter_ace(player):
return player.streak_max >= 100
# total air kills 20 or more
def example_2(player):
if player.ak_total >= 20:
return True
# 20 air kills and 200 ground kills
def example_3(player):
| return player.ak_total >= 20 and player.gk_total >= 200
# Sortie awards
# available parameters stats/models.py/class Sortie
# 5 air kills in one sortie
def fighter_hero(sortie):
return sortie.ak_total >= 5
# Mission awards
# available parameters stats/models.py/class PlayerMission
# 10 air kills in one mission
def mission_fighter_hero(player_mission):
| return player_mission.ak_total >= 15
"""
# streak 100 or more
def fighter_ace(player):
return player.streak_max >= 100
# 5 air kills in one sortie
def fighter_hero(sortie):
return sortie.ak_total >= 5
# 10 air kills in one mission
def mission_hero(player_mission):
return player_mission.ak_total >= 15
|
jeffbryner/MozDef | alerts/ldap_add.py | Python | mpl-2.0 | 1,114 | 0.001795 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class ldapAdd(AlertTask):
def main(self):
search_query = SearchQuery(minutes=15)
search_query.add_must([
TermMatch('category', 'ldapChange'),
TermMatch('details | .changetype', 'add')
])
self.filtersManual(search_query)
# Search events
self.searchEventsSimple()
self.walkEvents()
# Set alert properties
def onEvent(self, event):
category = 'ldap'
tags = ['ldap']
severity = 'INFO'
summary='{0} added {1}'.format(event['_source']['det | ails']['actor'], event['_source']['details']['dn'])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, [event], severity)
|
fugwenna/bunkbot | src/rpg/duel_result.py | Python | mit | 623 | 0.00321 | from ..core.bunk_user import BunkUser
"""
Metadata class for easy embeds when a duel has completed
"""
class DuelResult:
def __init__(self, chal: BunkUser, opnt: BunkUser, winner: BunkUser, loser: BunkUser):
| self.challenger: BunkUser = chal
self.opponent: BunkUser = opnt
self.winner: BunkUser = winner
self.loser: BunkUser = loser
self.challenger_roll: int = 0
| self.opponent_roll: int = 0
self.challenger.is_dueling = False
self.challenger.challenged_by_id = None
self.opponent.is_dueling = False
self.opponent.challenged_by_id = None
|
cjcjameson/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/access_methods/ao_memory.py | Python | apache-2.0 | 2,282 | 0.009641 | #!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
from tinctest import logger
from mpp.lib.PSQL import PSQL
f | rom mpp.models import MPPTestCase
import os
import re
import socket
import time
import shutil
import sys
import signal
class aoreadmemory | (MPPTestCase):
def tearDown(self):
gpfaultinjector = Command('fault injector',
'source $GPHOME/greenplum_path.sh; '
'gpfaultinjector -f malloc_failure '
'-y reset -H ALL -r primary')
gpfaultinjector.run()
def test_ao_malloc_failure(self):
"""
@product_version gpdb: [4.3.5.1 -]
"""
PSQL.run_sql_command('DROP table if exists ao_read_malloc')
PSQL.run_sql_command('create table ao_read_malloc (a int) with (appendonly=true, compresstype=quicklz)')
PSQL.run_sql_command('insert into ao_read_malloc '
'select * from generate_series(1, 1000)')
gpfaultinjector = Command('fault injector',
'source $GPHOME/greenplum_path.sh; '
'gpfaultinjector -f malloc_failure '
'-y error -H ALL -r primary')
gpfaultinjector.run()
res ={'rc':0, 'stdout':'', 'stderr':''}
PSQL.run_sql_command(sql_cmd='select count(*) from ao_read_malloc', results=res)
logger.info(res)
self.assertTrue("ERROR: fault triggered" in res['stderr'])
self.assertFalse("ERROR: could not temporarily connect to one or more segments" in res['stderr'])
logger.info('Pass')
|
muisit/freezer | gui/createvaultdialog.py | Python | gpl-3.0 | 1,774 | 0.006764 | #
# Copyright Muis IT 2011 - 2016
#
# This file is part of AWS Freezer
#
# AWS Freezer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as p | ublished by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AWS Freezer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AWS Freezer (see the | COPYING file).
# If not, see <http://www.gnu.org/licenses/>.
import globals
import guiobj
class CreateVaultDialog(guiobj.GUIObj):
def __init__(self,builder):
globals.GUI.createvaultdialog=self
self.window = builder.get_object('dialog_create_vault')
handlers ={
"createvault_button": self.createvault_button_clicked
}
builder.connect_signals(handlers)
self.set_entry(self.window, 'newvault_name','')
self.window.set_transient_for(globals.GUI.maindialog.window)
self.window.show_all()
def createvault_button_clicked(self, widget):
id = self.get_widget_id(widget)
globals.Reporter.message("createvault button clicked " + id,"gui")
if id == "button_createvault_ok":
globals.Reporter.message("creating vault","gui")
name = self.get_entry(self.window,"newvault_name")
globals.ActionFactory.create_vault(name)
else:
globals.Reporter.message("closing dialog","gui")
self.window.destroy()
globals.GUI.createvaultdialog=None |
imsparsh/python-social-auth | social/backends/email.py | Python | bsd-3-clause | 251 | 0 | """
Legacy Email backend, docs at:
http://psa.matiasaguirre.net/docs/backends/email.html
"""
from social.backends.legacy import LegacyAuth
class EmailAuth(LegacyAuth):
name = 'email'
| ID_KEY = 'email'
REQUIRES_ | EMAIL_VALIDATION = True
|
cathywu/Sentiment-Analysis | PyML-0.7.9/PyML/classifiers/ext/csvmodel.py | Python | gpl-2.0 | 14,618 | 0.011903 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
# This file is compatible with both classic and new-style classes.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_csvmodel', [dirname(__file__)])
except ImportError:
import _csvmodel
return _csvmodel
if fp is not None:
try:
_mod = imp.load_module('_csvmodel', fp, pathname, description)
finally:
fp.close()
return _mod
_csvmodel = swig_import_helper()
del swig_import_helper
else:
import _csvmodel
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _csvmodel.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _csvmodel.SwigPyIterator_value(self)
def incr(self, n = 1): return _csvmodel.SwigPyIterator_incr(self, n)
def decr(self, n = 1): return _csvmodel.SwigPyIterator_decr(self, n)
def distance(self, *args): return _csvmodel.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _csvmodel.SwigPyIterator_equal(self, *args)
def copy(self): return _csvmodel.SwigPyIterator_copy(self)
def next(self): return _csvmodel.SwigPyIterator_next(self)
def __next__(self): return _csvmodel.SwigPyIterator___next__(self)
def previous(self): return _csvmodel.SwigPyIterator_previous(self)
def advance(self, *args): return _csvmodel.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _csvmodel.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _csvmodel.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _csvmodel.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _csvmodel.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _csvmodel.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _csvmodel.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _csvmodel.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def iterator(self): return _csvmodel.IntVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _csvmodel.IntVector___nonzero__(self)
def __bool__(self): return _csvmodel.IntVector___bool_ | _(self)
def __len__(self): return _csvmodel.IntVector___len__(self)
def pop(self): retur | n _csvmodel.IntVector_pop(self)
def __getslice__(self, *args): return _csvmodel.IntVector___getslice__(self, *args)
def __setslice__(self, *args): return _csvmodel.IntVector___setslice__(self, *args)
def __delslice__(self, *args): return _csvmodel.IntVector___delslice__(self, *args)
def __delitem__(self, *args): return _csvmodel.IntVector___delitem__(self, *args)
def __getitem__(self, *args): return _csvmodel.IntVector___getitem__(self, *args)
def __setitem__(self, *args): return _csvmodel.IntVector___setitem__(self, *args)
def append(self, *args): return _csvmodel.IntVector_append(self, *args)
def empty(self): return _csvmodel.IntVector_empty(self)
def size(self): return _csvmodel.IntVector_size(self)
def clear(self): return _csvmodel.IntVector_clear(self)
def swap(self, *args): return _csvmodel.IntVector_swap(self, *args)
def get_allocator(self): return _csvmodel.IntVector_get_allocator(self)
def begin(self): return _csvmodel.IntVector_begin(self)
def end(self): return _csvmodel.IntVector_end(self)
def rbegin(self): return _csvmodel.IntVector_rbegin(self)
def rend(self): return _csvmodel.IntVector_rend(self)
def pop_back(self): return _csvmodel.IntVector_pop_back(self)
def erase(self, *args): return _csvmodel.IntVector_erase(self, *args)
def __init__(self, *args):
this = _csvmodel.new_IntVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _csvmodel.IntVector_push_back(self, *args)
def front(self): return _csvmodel.IntVector_front(self)
def back(self): return _csvmodel.IntVector_back(self)
def assign(self, *args): return _csvmodel.IntVector_assign(self, *args)
def resize(self, *args): return _csvmodel.IntVector_resize(self, *args)
def insert(self, *args): return _csvmodel.IntVector_insert(self, *args)
def reserve(self, *args): return _csvmodel.IntVector_reserve(self, *args)
def capacity(self): return _csvmodel.IntVector_capacity(self)
__swig_destroy__ = _csvmodel.delete_IntVector
__del__ = lambda self : None;
IntVector_swigregister = _csvmodel.IntVector_swigregister
IntVector_swigregister(IntVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def iterator(self): return _csvmodel.DoubleVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _csvmodel.DoubleVector___nonzero__(self)
def __bool__(self): return _csvmodel.DoubleVector___bool__(self)
def __len__(self): return _csvmodel.DoubleVector___len__(self)
def pop(self): return _csvmodel.DoubleVector_pop(self)
def __getslice__(self, *args): return _csvmodel.DoubleVector___getslice__(self, *args)
def __setslice__(self, *args): return _csvmodel.DoubleVector___setslice__(self, *args)
def __delslice__(self, *args): return _csvmodel.DoubleVector___delslice__(self, *args)
def __delitem__(self, *args): return _csvmodel.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args): return _csvmodel.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args): return _csvmodel.DoubleVector___setitem__(self, *args)
def append(self, *args): return _csvmodel.DoubleVector_append(self, *args)
def empty(self |
DeppSRL/open-partecipate | project/open_partecipate/management/commands/fix_provincie_autonome.py | Python | bsd-3-clause | 995 | 0.00402 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from ...territori.models import Territorio
class Command(BaseCommand):
help = 'Fix for provincie autonome'
def handle(self, *args, **options):
Territorio.objects.regioni().get(denominazione='TRENTINO-ALTO ADIGE/S | UDTIROL').delete()
for name in ['BOLZANO', 'TRENTO']:
territorio = Territorio.objects.provincie().get(denominazione__istartswith=name)
territorio.pk = None
territorio | .tipo = Territorio.TIPO.R
territorio.cod_reg = territorio.cod_prov
territorio.cod_prov = None
territorio.denominazione = 'P.A. DI {}'.format(name)
territorio.slug = None
territorio.save()
Territorio.objects.provincie().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
Territorio.objects.comuni().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
|
indico/indico | indico/modules/users/models/suggestions.py | Python | mit | 2,202 | 0.002725 | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico | is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.util.string import format_repr
class SuggestedCategory(db.Model):
__tablename__ = 'suggested_categories'
__table_args__ = {'schema': 'users'}
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
primary_key=True,
index=True, |
autoincrement=False
)
category_id = db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
primary_key=True,
index=True,
autoincrement=False
)
is_ignored = db.Column(
db.Boolean,
nullable=False,
default=False
)
score = db.Column(
db.Float,
nullable=False,
default=0
)
category = db.relationship(
'Category',
lazy=False,
backref=db.backref(
'suggestions',
lazy=True,
cascade='all, delete-orphan'
)
)
# relationship backrefs:
# - user (User.suggested_categories)
def __repr__(self):
return format_repr(self, 'user_id', 'category_id', 'score', is_ignored=False)
@classmethod
def merge_users(cls, target, source):
"""Merge the suggestions for two users.
:param target: The target user of the merge.
:param source: The user that is being merged into `target`.
"""
target_suggestions = {x.category: x for x in target.suggested_categories}
for suggestion in source.suggested_categories:
new_suggestion = target_suggestions.get(suggestion.category) or cls(user=target,
category=suggestion.category,
score=0)
new_suggestion.score = max(new_suggestion.score, suggestion.score)
new_suggestion.is_ignored = new_suggestion.is_ignored or suggestion.is_ignored
db.session.flush()
|
alonho/pql | setup.py | Python | bsd-3-clause | 929 | 0.004306 | from setuptools import setup
__version__ = '0.5.0'
setup(name='pql',
version=__version__,
description='A python expression to MongoDB query translator',
author='Alon Horev',
author_email='alon@horev.net',
url='https://github.com/alonho/pql',
classifiers = ["Development Sta | tus :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.5",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: Mac | OS X"],
license='BSD',
# I know it's bad practice to not specify a pymongo version, but we only
# require the bson.ObjectId type, It's safe to assume it won't change (famous last words)
install_requires=['pymongo',
'python-dateutil'],
packages=['pql']) |
kkopachev/thumbor | thumbor/server.py | Python | mit | 4,551 | 0.00022 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import logging
import logging.config
import os
import socket
import sys
import warnings
from os.path import dirname, expanduser
from shutil import which
import tornado.ioloop
from PIL import Image
from tornado.httpserver import HTTPServer
from tornado.netutil import bind_unix_socket
from thumbor.config import Config
from thumbor.console import get_server_parameters
from thumbor.context import Context
from thumbor.importer import Importer
from thumbor.signal_handler import setup_signal_handler
def get_as_integer(value):
try:
return int(value)
except (ValueError, TypeError):
return None
def get_config(config_path, use_environment=False):
if use_environment:
Config.allow_environment_variables()
lookup_paths = [os.curdir, expanduser("~"), "/etc/", dirname(__file__)]
return Config.load(config_path, conf_name="thumbor.conf", lookup_paths=lookup_paths)
def configure_log(config, log_level):
if config.THUMBOR_LOG_CONFIG and config.THUMBOR_LOG_CONFIG != "":
logging.config.dictConfig(config.THUMBOR_LOG_CONFIG)
else:
logging.basicConfig(
level=getattr(logging, log_level),
format=config.THUMBOR_LOG_FORMAT,
datefmt=config.THUMBOR_LOG_DATE_FORMAT,
)
def get_importer(config):
importer = Importer(config)
importer.import_modules()
if importer.error_handler_class is not None:
importer.error_handler = importer.error_handler_class(config)
return importer
def validate_config(config, server_parameters):
if server_parameters.security_key is None:
server_parameters.security_key = config.SECURITY_KEY
if not isinstance(server_parameters.security_key, (bytes, str)):
raise RuntimeError(
"No security key was found for this instance of thumbor. "
+ "Please provide one using the conf file or a security key file."
)
if config.ENGINE or config.USE_GIFSICLE_ENGINE:
# Error on Image.open when image pixel count is above MAX_IMAGE_PIXELS
warnings.simplefilter("error", Image.DecompressionBombWarning)
if config.USE_GIFSICLE_ENGINE:
server_parameters.gifsicle_path = which("gifsicle")
if server_parameters.gifsicle_path is None:
raise RuntimeError(
"If using USE_GIFSICLE_ENGINE configuration to True,"
" the `gifsicle` binary must be in the PATH "
"and must be an executable."
)
def get_context(server_parameters, config, importer):
return Context(server=server_parameters, config=config, importer=importer)
def get_application(context):
return context.modules.importer.import_class(context.app_class)(context)
def run_server(application, context):
server = HTTPServer(application, xheaders=True)
if context.server.fd is not None:
fd_number = get_as_integer(context.server.fd)
if fd_number is not None:
sock = socket.fromfd(
fd_number, socket.AF_INET | socket.AF_INET6, socket.SOCK_STREAM
)
else:
sock = bind_unix_socket(context.server.fd)
server.add_socket(sock)
logging.debug(
"thumbor starting at fd %s", context.server.fd
)
else:
server.bind(context.server.port, context.server.ip)
logging.debug(
"thumbor starting at %s:%d", context.server.ip, context.server.port
)
server.start(context.server.processes)
return server
def main(arguments=None):
"""Runs thumbor server with the specified arguments."""
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(
server_parameters.config_path, server_parameters.use_environment
)
configure_log(config, server_parameters.log_level.upper())
val | idate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
server = run_server( | application, context)
setup_signal_handler(server, config)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
|
mozilla-b2g/fxos-certsuite | mcts/certsuite/test_cert.py | Python | mpl-2.0 | 2,006 | 0.006979 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can | obtain one at http:/ | /mozilla.org/MPL/2.0/.
import unittest
from cert import test_user_agent
class Logger(object):
"""Dummy logger"""
def test_status(this, *args):
pass
class TestCert(unittest.TestCase):
def setUp(self):
self.logger = Logger()
def test_test_user_agent(self):
self.assertFalse(test_user_agent("Mozilla/5.0 (Android; Mobile; rv:14.0) Gecko/14.0 Firefox/14.0", self.logger), "android")
self.assertTrue(test_user_agent("Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "mobile")
self.assertTrue(test_user_agent("Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "tablet")
self.assertTrue(test_user_agent("Mozilla/5.0 (Mobile; nnnn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "example device")
self.assertFalse(test_user_agent("Mozilla/5.0 (Mobile; nn nn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "invalid device")
self.assertFalse(test_user_agent("Mozilla/5.0 (Mobile; nn;nn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "invalid device")
self.assertFalse(test_user_agent("Mozilla/5.0 (Mobile; nn/nn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "invalid device")
self.assertFalse(test_user_agent("Mozilla/5.0 (Mobile; nn(nn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "invalid device")
self.assertFalse(test_user_agent("Mozilla/5.0 (Mobile; nn)nn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "invalid device")
self.assertTrue(test_user_agent("Mozilla/5.0 (Mobile; nnnn ; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "extra whitespace in device")
self.assertTrue(test_user_agent("Mozilla/5.0 (Mobile;nnnn; rv:26.0) Gecko/26.0 Firefox/26.0", self.logger), "no whitespace in device")
if __name__ == '__main__':
unittest.main()
|
chubbymaggie/angr | angr/procedures/win32/dynamic_loading.py | Python | bsd-2-clause | 3,321 | 0.003613 | import angr
import claripy
import logging
l = logging.getLogger('angr.procedures.win32.dynamic_loading')
class LoadLibraryA(angr.SimProcedure):
def run(self, lib_ptr):
lib = self.state.mem[lib_ptr].string.concrete
return self.load(lib)
def load(self, lib):
if '.' not in lib:
lib += '.dll'
loaded = self.project.loader.dynamic_load(lib)
if loaded is None:
return 0
# Add simprocedures
for obj in loaded:
self.register(obj)
l.debug("Loaded %s", lib)
return self.project.loader.find_object(lib).mapped_base
def register(self, obj): # can be overridden for instrumentation
self.project._register_object(obj)
class LoadLibraryExW(Lo | adLibraryA):
def run(self, lib_ptr, flag1, flag2):
lib = self.state.mem[lib_ptr].wstring.concrete
return self.load(lib)
# if you subclass LoadLibraryA to provide register, you can implement LoadLibraryExW by making an empty class that just
# subclasses your special procedure | and LoadLibraryExW
class GetProcAddress(angr.SimProcedure):
def run(self, lib_handle, name_addr):
if lib_handle.symbolic:
raise angr.errors.SimValueError("GetProcAddress called with symbolic library handle %s" % lib_handle)
lib_handle = self.state.se.eval(lib_handle)
if lib_handle == 0:
obj = self.project.loader.main_object
else:
for obj in self.project.loader.all_pe_objects:
if obj.mapped_base == lib_handle:
break
else:
l.warning("GetProcAddress: invalid library handle %s", lib_handle)
return 0
if claripy.is_true(name_addr < 0x10000):
# this matches the bogus name specified in the loader...
ordinal = self.state.se.eval(name_addr)
name = 'ordinal.%d.%s' % (ordinal, obj.provides)
else:
name = self.state.mem[name_addr].string.concrete
full_name = '%s.%s' % (obj.provides, name)
self.procs.add(full_name)
sym = obj.get_symbol(name)
if sym is None and name.endswith('@'):
# There seems to be some mangling parsing being done in the linker?
# I don't know what I'm doing
for suffix in ['Z', 'XZ']:
sym = obj.get_symbol(name + suffix)
if sym is not None:
name = name + suffix
break
if sym is None:
l.warning("GetProcAddress: object %s does not contain %s", obj.provides, name)
return 0
sym = sym.resolve_forwarder()
if sym is None:
l.warning("GetProcAddress: forwarding failed for %s from %s", name, obj.provides)
return 0
name = sym.name # fix ordinal names
full_name = '%s.%s' % (obj.provides, name)
self.procs.add(full_name)
l.debug("Imported %s (%#x) from %s", name, sym.rebased_addr, obj.provides)
return sym.rebased_addr
KEY = 'dynamically_loaded_procedures'
@property
def procs(self):
try:
return self.state.globals[self.KEY]
except KeyError:
x = set()
self.state.globals[self.KEY] = x
return x
|
wimberosa/samba | source4/scripting/devel/repl_cleartext_pwd.py | Python | gpl-3.0 | 15,156 | 0.006796 | #!/usr/bin/env python
#
# Copyright Stefan Metzmacher 2011-2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This is useful to sync passwords from an AD domain.
#
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4 \
# 172.31.9.219 DC=bla,DC=base /tmp/cookie cleartext_utf8 131085 displayName
# # starting at usn[0]
# dn: CN=Test User1,CN=Users,DC=bla,DC=base
# cleartext_utf8: A1b2C3d4
# displayName:: VABlAHMAdAAgAFUAcwBlAHIAMQA=
#
# # up to usn[16449]
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4
# 172.31.9.219 DC=bla,DC=base cookie_file cleartext_utf8 131085 displayName
# # starting at usn[16449]
# # up to usn[16449]
# $
#
import sys
# Find right direction when running from source tree
sys.path.insert(0, "bin/python")
import samba.getopt as options
from optparse import OptionParser
from samba.dcerpc import drsuapi, drsblobs, misc
from samba.ndr import ndr_pack, ndr_unpack, ndr_print
import binascii
import hashlib
import Crypto.Cipher.ARC4
import struct
import os
from ldif import LDIFWriter
class globals:
def __init__(self):
self.global_objs = {}
self.ldif = LDIFWriter(sys.stdout)
def add_attr(self, dn, attname, vals):
if dn not in self.global_objs:
self.global_objs[dn] = {}
self.global_objs[dn][attname] = vals
def print_all(self):
for dn, obj in self.global_objs.items():
self.ldif.unparse(dn, obj)
continue
self.global_objs = {}
########### main code ###########
if __name__ == "__main__":
parser = OptionParser("repl_cleartext_pwd.py [options] server dn cookie_file cleartext_name [attid attname]")
sambaopts = options.SambaOptions(parser)
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
(opts, args) = parser.parse_args()
if len(args) < 4 or len(args) == 5:
parser.error("more arguments required")
server = args[0]
dn = args[1]
cookie_file = args[2]
if len(cookie_file) == 0:
cookie_file = None
cleartext_name = args[3]
if len(args) >= 5:
attid = int(args[4])
attname = args[5]
else:
attid = -1
attname = None
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
if not creds.authentication_requested():
parser.error("You must supply credentials")
gls = globals()
try:
f = open(cookie_file, 'r')
store_blob = f.read()
f.close()
store_hdr = store_blob[0:28]
(store_version, \
store_dn_len, store_dn_ofs, \
store_hwm_len, store_hwm_ofs, \
store_utdv_len, store_utdv_ofs) = \
struct.unpack("<LLLLLLL", store_hdr)
store_dn = store_blob[store_dn_ofs:store_dn_ofs+store_dn_len]
store_hwm_blob = store_blob[store_hwm_ofs:store_hwm_ofs+store_hwm_len]
store_utdv_blob = store_blob[store_utdv_ofs:store_utdv_ofs+store_utdv_len]
store_hwm = ndr_unpack(drsuapi.DsReplicaHighWaterMark, store_hwm_blob)
store_utdv = ndr_unpack(drsblobs.replUpToDateVectorBlob, store_utdv_blob)
assert store_dn == dn
#print "%s" % ndr_print(store_hwm)
#print "%s" % ndr_print(store_utdv)
except:
store_dn = dn
store_hwm = drsuapi.DsReplicaHighWaterMark()
store_hwm.tmp_highest_usn = 0
store_hwm.reserved_usn = 0
store_hwm.highest_usn = 0
store_utdv = None
binding_str = "ncacn_ip_tcp:%s[spnego,seal]" % server
drs_conn = drsuapi.drsuapi(binding_str, lp, creds)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPO | RTED_EXTENSION_GET_MEMBERSHIPS2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPO | RTED_EXTENSION_GETCHGREQ_V8
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT
(info, drs_handle) = drs_conn.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
null_guid = misc.GUID()
naming_context = drsuapi.DsReplicaObjectIdentifier()
naming_context.dn = dn
highwatermark = store_hwm
uptodateness_vector = None
if store_utdv is not None:
uptodateness_vector = drsuapi.DsReplicaCursorCtrEx()
if store_utdv.version == 1:
uptodateness_vector.cursors = store_utdv.cursors
elif store_utdv.version == 2:
cursors = []
for i in range(0, store_utdv.ctr.count):
cursor = drsuapi.DsReplicaCursor()
cursor.source_dsa_invocation_id = store_utdv.ctr.cursors[i].source_dsa_invocation_id
cursor.highest_usn = store_utdv.ctr.cursors[i].highest_usn
cursors.append(cursor)
uptodateness_vector.cursors = cursors
req8 = drsuapi.DsGetNCChangesRequest8()
req8.destination_dsa_guid = null_guid
req8.source_dsa_invocation_id = null_guid
req8.naming_context = naming_context
req8.highwatermark = highwatermark
req8.uptodateness_vector |
mlavin/tincan | shoestring/app.py | Python | bsd-2-clause | 1,584 | 0.004419 | import os
import time
from importlib import import_module
from tornado.ioloop import IOLoop
from tornado.web import Application
from .handlers import CreateRoomHandler, GetRoomHandler, SocketHandler, IndexHandler
class ShoestringApplication(Application):
def __init__(self, **kwargs):
backend_name = kwargs.pop('backend', 'shoestring.backends.memory')
backend_module = import_module(backend_name)
try:
backend_class = getattr(backend_module, 'Backend')
except AttributeError as e:
msg = 'Module "{}" does not define a Backend class.'.format(backend_name)
raise ImportError(msg) from e
self.backend = backend_class()
routes = [
(r'/rooms$', CreateRoomHandler, {'backend': self.backend}),
(r'/rooms/(?P<room>[0-9]+)$', GetRoomHandler, {'backend': self.backend}),
(r'/socket$', SocketHandler, {'backend': self.backend}),
(r'/$', IndexHandler),
]
settings = {
| 'template_path': os.path.join(os.path.dirname(__file__), os.pardir, 'templates'),
'static_path': os.path.join(os.path.dirname(__file__), os.pardir, 'static'),
'static_url_prefix': '/static/',
'secret': os.environ.get('SHOESTRING_SECRET_KEY', str(os.urandom(75))),
}
settings.update(kwargs)
super().__init__(routes, **settings)
def shutdown(self, graceful=True):
"""Shutdown of | the application server. Might be immediate or graceful."""
self.backend.shutdown(graceful=graceful)
|
martinahogg/machinelearning | tools/numpy-examples.py | Python | apache-2.0 | 1,533 | 0.02544 | import numpy as np
# Inner (or dot) product
a = np.array([1,2])
b = np.array([3,4])
np | .inner(a, b)
a.dot(b)
# Outer product
a = np.array([1,2])
b = np.array([3,4])
np.outer(a, b)
# Inverse
m = np.array([[1,2], [3,4]])
np.linalg.inv(m)
# Inner (or dot) product
m = np.array([[1,2], [3,4]])
minv = np.li | nalg.inv(m)
m.dot(minv)
# Diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
m = np.array([1,2])
np.diag(m)
# Determinant
m = np.array([[1,2], [3,4]])
np.linalg.det(m)
# Trace - sum of elements of the diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
np.diag(m).sum()
np.trace(m)
# Transpose
m = np.array([ [1,2], [3,4] ])
m.T
# Gaussian distribution
m = np.random.randn(2,3)
m
# Covariance
X = np.random.randn(100,3)
np.cov(X.T)
# Eigen vectors and values
# For symmetric matrix (m == m.T) and hermitian matrix (m = m.H) we use eigh.
m = np.array([
[ 0.89761228, 0.00538701, -0.03229084],
[ 0.00538701, 1.04860676, -0.25001666],
[-0.03229084, -0.25001666, 0.81116126]])
# The first tuple contains three Eigen values.
# The second tuple contains Eigen vectors stored in columns.
np.linalg.eigh(m)
# Solving linear systems
# The admissions fee at a small far is $1.50 for children an $4.00 for adults.
# On a certain day 2,200 people enter the fair and $5050 is collected.
# How many children and how many adults attended.
#
# Let X1 = number of children
# Let X2 = number of adults
# X1 + X2 = 2200
# 1.5X1 + 4X2 = 5050
a = np.array([ [1,1], [1.5,4] ])
b = np.array( [ 2200, 5050] )
np.linalg.solve(a, b) |
zestyr/lbry | lbrynet/core/PaymentRateManager.py | Python | mit | 4,661 | 0.001287 | from lbrynet.core.Strategy import get_default_strategy, OnlyFreeStrategy
from lbrynet import conf
from decimal import Decimal
class BasePaymentRateManager(object):
def __init__(self, rate=None, info_rate=None):
self.min_blob_data_payment_rate = rate if rate is not None else conf.settings['data_rate']
self.min_blob_info_payment_rate = (
info_rate if info_rate is not None else conf.settings['min_info_rate'])
class PaymentRateManager(object):
def __init__(self, base, rate=None):
"""
@param base: a BasePaymentRateManager
@param rate: the min blob data payment rate
"""
self.base = base
self.min_blob_data_payment_rate = rate
self.points_paid = 0.0
def get_rate_blob_data(self, peer):
return self.get_effective_min_blo | b_data_payment_rate()
def accept_rate_blob_data(self, peer, payment_rate):
return payment_rate >= self.get_effective_min_blob_data_payment_rate()
def get_effective_min_blob_data_payment_rate(self):
if self.min_blob_data_payment_rate is None:
return self.base.min_blob_data_payment_rate
return self.min_blob_data_payment_rate
def record_points_paid(self, amount):
self.points_paid += amount
class NegotiatedPaymentRateManag | er(object):
def __init__(self, base, availability_tracker, generous=None):
"""
@param base: a BasePaymentRateManager
@param availability_tracker: a BlobAvailabilityTracker
@param rate: the min blob data payment rate
"""
self.base = base
self.min_blob_data_payment_rate = base.min_blob_data_payment_rate
self.points_paid = 0.0
self.blob_tracker = availability_tracker
self.generous = generous if generous is not None else conf.settings['is_generous_host']
self.strategy = get_default_strategy(self.blob_tracker,
base_price=self.base.min_blob_data_payment_rate,
is_generous=generous)
def get_rate_blob_data(self, peer, blobs):
response = self.strategy.make_offer(peer, blobs)
return response.rate
def accept_rate_blob_data(self, peer, blobs, offer):
offer = self.strategy.respond_to_offer(offer, peer, blobs)
self.strategy.update_accepted_offers(peer, offer)
return offer.is_accepted
def reply_to_offer(self, peer, blobs, offer):
reply = self.strategy.respond_to_offer(offer, peer, blobs)
self.strategy.update_accepted_offers(peer, reply)
return reply
def get_rate_for_peer(self, peer):
return self.strategy.accepted_offers.get(peer, False)
def record_points_paid(self, amount):
self.points_paid += amount
def record_offer_reply(self, peer, offer):
self.strategy.update_accepted_offers(peer, offer)
def price_limit_reached(self, peer):
if peer in self.strategy.pending_sent_offers:
offer = self.strategy.pending_sent_offers[peer]
return (offer.is_too_low and
round(Decimal.from_float(offer.rate), 5) >= round(self.strategy.max_rate, 5))
return False
class OnlyFreePaymentsManager(object):
def __init__(self, **kwargs):
"""
A payment rate manager that will only ever accept and offer a rate of 0.0,
Used for testing
"""
self.base = BasePaymentRateManager(0.0, 0.0)
self.points_paid = 0.0
self.generous = True
self.strategy = OnlyFreeStrategy()
def get_rate_blob_data(self, peer, blobs):
response = self.strategy.make_offer(peer, blobs)
return response.rate
def accept_rate_blob_data(self, peer, blobs, offer):
offer = self.strategy.respond_to_offer(offer, peer, blobs)
self.strategy.update_accepted_offers(peer, offer)
return offer.is_accepted
def reply_to_offer(self, peer, blobs, offer):
reply = self.strategy.respond_to_offer(offer, peer, blobs)
self.strategy.update_accepted_offers(peer, reply)
return reply
def get_rate_for_peer(self, peer):
return self.strategy.accepted_offers.get(peer, False)
def record_points_paid(self, amount):
self.points_paid += amount
def record_offer_reply(self, peer, offer):
self.strategy.update_accepted_offers(peer, offer)
def price_limit_reached(self, peer):
if peer in self.strategy.pending_sent_offers:
offer = self.strategy.pending_sent_offers[peer]
if offer.rate > 0.0:
return True
return False
|
ahmedaljazzar/edx-platform | cms/djangoapps/contentstore/features/advanced_settings.py | Python | agpl-3.0 | 769 | 0.0013 | # pylint: disable=missing-docstring
# pylint: disable=red | efined-outer-name
from lettuce import step, world
from cms.djangoapps.contentstore.features.common import press_the_notification_button, type_in_codemirror
KEY_CSS = '.key h3.title'
ADVANCED_MODULES_KEY = "Advanced Module List"
def get_index_of(expected_key):
for i, element in enumerate(world.css_find(KEY_CSS)):
# Sometimes get stale reference if I hold | on to the array of elements
key = world.css_value(KEY_CSS, index=i)
if key == expected_key:
return i
return -1
def change_value(step, key, new_value):
index = get_index_of(key)
type_in_codemirror(index, new_value)
press_the_notification_button(step, "Save")
world.wait_for_ajax_complete()
|
rbirger/OxfordHCVNonSpatial | Non-Spatial Model Outline and Code_old.py | Python | bsd-2-clause | 44,084 | 0.022684 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ###Description and preliminary code for Continuous-Time Markov Chain Model
#
# This model will test the importance of including a spatial component in the system. We will use ODEs to describe the dynamics of each lineage and competition between lineages.
# The different states that each cell can move through are as follows
#
# * Healthy Hepatocytes
#
# * Latently Infected Hepatocytes
#
# * Infected Hepatocytes
#
# * Dead Infected Hepatocytes
#
# * Dead Healthy Hepatocytes
#
# Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating.
#
# Adapting the Perelson/Neumann model, we have
#
# $\begin{eqnarray*}
# \frac{dT}{dt}& =& \phi_{DT} D_T + \phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\
# \frac{dE}{dt}& =& (\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\
# \frac{dI}{dt}& =& \alpha E- \nu_I I\\
# \frac{dD_T}{dt}& =& \nu_T(T+E) - \phi_{DT} D_T\\
# \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\
# \end{eqnarray*}$
#
#
#
#
# To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - e$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows
#
#
# $$\begin{cases}
# (\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\
# \nu_T T \Delta t + o(\Delta t), & a = -1, d = 1\\
# \alpha E \Delta t + o(\Delta t), & b = -1, c = 1\\
# \nu_T E \Delta t + o(\Delta t), & b = -1, d = 1\\
# \nu_I I \Delta t + o(\Delta t), & c = -1, e = 1 \\
# \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\
# \phi_{DI} D_I \Delta t + o(\Delta t), & e = -1, a = 1\\
# \end{cases}$$
#
# The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows
#
# <!--($$ \mathbf{Q} =
# \left[ \begin{array}{ccccc}
# - (\beta I + \lambda +d) T & (\beta I + \lambda) T & 0 & 0 & dT \\
# 0 & -(\eta + d) L & \eta L &0 & dL \\
# 0 & 0 & -\delta I & \delta I & 0 \\
# \alpha_I D_I &0 &0 & -\alpha_I D_I&0\\
# \alpha_T D_T & 0 & 0& 0& -\alpha_T D_T\\
# \end{array} \right] $$ -->
#
# $$ \mathbf{Q} =
# \left[ \begin{array}{ccccc}
# 0& (\lambda_{virions} + \lambda_{local}) T& 0 & 0 & \nu_T T \\
# 0 & 0 & \alpha E & \nu_T E &0 \\
# 0 & 0 & 0 & 0 & \nu_I I\\
# \phi_{DT} D_T &0 &0 & 0&0\\
# \phi_{DI} D_I & 0 & 0& 0& 0\\
# \end{array} \right] $$
# <codecell>
%matplotlib inline
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# <codecell>
beta=.2
nu = .01
d = 2e-2
eta = 1
delta = 3*d
alpha_I = .8e-1
alpha_T = 2e-1
# <codecell>
from __future__ import division
import numpy as np
#Number of state transitions to observe
M = int(1e6)
# time vector
time = np.zeros(M)
#Define parameters
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(8e10) #Number of cells in liver
alpha = 1 #1/latent period (days)
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = 1e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
N=N_liver/1e6
init=10
v_init = 1e6
sim=3
Q = np.zeros(7)
Q[0] = (beta_L*init + beta_V*v_init); #Infection of Target cell
Q[1] = nu_T; #Death of target cell
Q[2] = alpha; #latent cell becomes infected
Q[3] = nu_T; #latent cell dies
Q[4] = nu_I; #Infected cell dies
Q[5] = phi_T; #Healthy cell regenerates
Q[6] = phi_I; #Infected cell regenerates
#Construct matrix of state transition vectors
trans_vecs = np.zeros([5,7])
#state 1: infection of healthy cell
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: death of healthy cell
trans_vecs[0,1] = -1;
trans_vecs[3,1] = 1;
#state 3: movement of latent cell into infected
trans_vecs[1,2] = -1;
trans_vecs[2,2] = 1;
#state 4: death of latent cell
trans_vecs[1,3] = -1;
trans_vecs[3,3] = 1;
#state 5: death of infected cell
trans_vecs[2,4] = -1;
trans_vecs[4,4] = 1;
#state 6: regeneration of dead healthy cell
trans_vecs[3,5] = -1;
trans_vecs[0,5] = 1;
#state 6: regeneration of dead infected cell
trans_vecs[4,6] = -1;
trans_vecs[0,6] = 1;
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
#Initialize state vector and index
#state_vec = np.vstack([S,E,I,Di,Dt])
j =0
while I[j] >0 and j<M-1:
#print [T[j],E[j],I[j],Dt[j],Di[j]]
#Update Q to reflect new number of infected cells and viruses
Q[0] = (beta_L*I[j] +beta_V*VL[j]);
#Calculate transition matrix
Qij = Q*[T[j],T[j],E[j],E[j],I[j],Dt[j],Di[j]]
#Draw from exponential distributions of waiting times
time_vec = -np.log(np.random.random(7))/Qij
#np.random.exponential([1/Qij])[0]
#
#find minimum waiting time and obtain index to ascertain next state jump
newTime = min(time_vec)
time_vecL = time_vec.tolist()
state_idx = time_vecL.index(min(time_vecL))
[T[j+1],E[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx]
VL[j+1] = VL[0]+rho*I[j]*R/(gamma*c)
time[j+1] = time[j] + newTime
j+=1
# <codecell>
[T[j],E[j],I[j],Dt[j],Di[j]]
rho*I[j]*R/(gamma*c)
# <codecell>
%%timeit
np.random.exponential(y)
# <codecell>
y= np.ones(11)
# <codecell>
plt.plot(time[0:M-1],VL[0:M-1])
# <codecell>
plt.plot(time,T, label = 'Susc')
plt.plot(time,I, label = 'Infected')
plt.plot(time,Dt, label = 'Dead (healthy)')
plt.plot(time,Di, label = 'Dead (infected)')
plt.legend(loc = 'upper right')
# <markdowncell>
# An updated version of the model includes a second latent class that keeps cells latently infected for longer before becoming infectious, and also allows for proliferation of infected cells by allowing cells to be reborn into the latent class
#
# * Healthy Hepatocytes
#
# * Latently Infected Hepatocytes
#
# * Long-lived Latently Infected Hepatocytes
#
# * Infected Hepatocytes
#
# * Dead Infected Hepatocytes
#
# * Dead Healthy Hepatocytes
#
# Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating. Some cells regenerate into infectious cells.
#
# Adapting the Perelson/Neumann model, we have
#
# $\begin{eqnarray*}
# \frac{dT}{d | t}& =& \phi_{DT} D_T + (1-\kappa)\phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\
# \frac{dE}{dt}& =& (1-\eta)(\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\
# \frac{dEX}{dt}& =& \eta(\lambda_{virions} + \lambda_{local} )T - (\alpha_X +\nu_T)E\\
# \frac{dI}{dt}& =& \kappa\phi_{DI} D_I+ \alpha E- \nu_I I\\
# \frac{dD_T}{dt}& = | & \nu_T(T+E+EX) - \phi_{DT} D_T\\
# \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\
# \end{eqnarray*}$
#
# To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), EX(t) I(t), D_T(t), D_I(t)]$, so the probability of state chan |
ahmadiga/min_edx | lms/envs/devstack.py | Python | agpl-3.0 | 7,967 | 0.004017 | """
Specific overrides to the base prod settings to make development easier.
"""
from os.path import abspath, dirname, join
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
HTTPS = 'off'
################################ LOGGERS ######################################
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.contexts', logging.CRITICAL),
('track.middleware', logging.CRITICAL),
('dd.dogapi', logging.CRITICAL),
('django_comment_client.utils', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None |
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ( |
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
# ProfilingPanel has been intentionally removed for default devstack.py
# runtimes for performance reasons. If you wish to re-enable it in your
# local development environment, please create a new settings file
# that imports and extends devstack.py.
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
# Skip packaging and optimization in development
PIPELINE_ENABLED = False
STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
# Revert to the default set of finders as we don't want the production pipeline
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Disable JavaScript compression in development
PIPELINE_JS_COMPRESSOR = None
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Milestones #################################
FEATURES['ORGANIZATIONS_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Setting for overriding default filtering facets for Course discovery
# COURSE_DISCOVERY_FILTERS = ["org", "language", "modes"]
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# Skip enrollment start date filtering
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
############## ECOMMERCE API CONFIGURATION SETTINGS ###############
ECOMMERCE_PUBLIC_URL_ROOT = "http://localhost:8002"
###################### Cross-domain requests ######################
FEATURES['ENABLE_CORS_HEADERS'] = True
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = True
#####################################################################
# See if the developer has any local overrides.
if os.path.isfile(join(dirname(abspath(__file__)), 'private.py')):
from .private import * # pylint: disable=import-error,wildcard-import
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
|
jhseu/tensorflow | tensorflow/python/compiler/tensorrt/trt_convert_windows.py | Python | apache-2.0 | 5,368 | 0.002608 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the TRT conversion for Windows platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
from tensorflow.python.util.tf_export import tf_export
if platform.system() != "Windows":
raise RuntimeError(
"This module is expected to be loaded only on Windows platform.")
class TrtPrecisionMode(object):
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
# Use a large enough number as the default max_workspace_size for TRT engines,
# so it can produce reasonable performance results with the default.
DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30
@tf_export("experimental.tensorrt.ConversionParams", v1=[])
class TrtConversionParams(object):
"""A class to encapsulate parameters that are used for TF-TRT conversion."""
def __init__(self,
rewriter_config_template=None,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=True,
maximum_cached_engines=1,
use_calibration=True,
max_batch_size=1):
"""Initialize TrtConversionParams.
Args:
rewriter_config_template: a template RewriterConfig proto used to create a
TRT-enabled RewriterConfig. If None, it will use a default one.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the
TRT network and engine at run time. i.e. Since TensorRT version < 6.0
does not support dynamic dimensions other than the batch dimension, when
the TensorFlow graph has a non-batch dimension of dynamic size, we would
need to enable this option. This option should be set to True in TF 2.0.
maximum_cached_engines: max number of cached TRT engines for dynamic TRT
ops. Created TRT engines for a dynamic dimension are cached. This is the
maximum number of engines that can be cached. If the number of cached
engines is already at max but none of them supports the input shapes,
the TRTEngineOp will fall back to run the original TF subgraph that
corresponds to the TRTEngineOp.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(exlcuding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
max_batch_size: max size for the input batch. This parameter is only
effective when is_dynamic_op=False which is not supported in TF 2.0.
Raises:
NotImplementedError: TRT is not supported on Windows.
"""
raise NotImplementedError(
"TensorRT | integration is not available on Windows.")
@tf_export("experimental.tensorrt.Converter", v1=[])
class TrtConverterWindows(object):
"""An offline converter for TF-TRT transformation for TF 2.0 SavedModels.
Currently this is not available on W | indows platform.
"""
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
conversion_params=None):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
conversion_params: a TrtConversionParams instance.
Raises:
NotImplementedError: TRT is not supported on Windows.
"""
raise NotImplementedError(
"TensorRT integration is not available on Windows.")
|
StefanRijnhart/odoo | addons/analytic/models/analytic.py | Python | agpl-3.0 | 19,270 | 0.005812 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
| context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.i | d] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
|
cslansing/shaft | shaft/settings/base.py | Python | mit | 4,489 | 0.000891 | import os
import django
from secret_key import *
# calculated paths for django and the site
# used as starting points for various other paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
# project root is same as current directory when being called from manage.py
PROJECT_ROOT = os.path.abspath('./')
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'shaft.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'shaft.wsgi.application'
PREREQ_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
]
PROJECT_APPS = [
'clansy',
]
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
TEMPLATE_DIRS = [os.path.join(PROJECT_ROOT, app + '/templates/' ) for app in PROJECT_APPS]
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, app + '/static/' ) for app in PROJE | CT_APPS]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
| 'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/features/meta/aggregators.py | Python | mit | 2,017 | 0 | """
These Meta-Features apply an aggregate function t | o
:class:`~revscoring.Data | source` that return lists of values.
.. autoclass revscoring.features.meta.aggregators.sum
.. autoclass revscoring.features.meta.aggregators.len
.. autoclass revscoring.features.meta.aggregators.max
.. autoclass revscoring.features.meta.aggregators.min
"""
from ..feature import Feature
len_builtin = len
sum_builtin = sum
max_builtin = max
min_builtin = min
class sum(Feature):
def __init__(self, items_datasource, name=None, returns=float):
name = self._format_name(name, [items_datasource])
super().__init__(name, self.process, depends_on=[items_datasource],
returns=returns)
def process(self, items):
return self.returns(sum_builtin(items or []))
class len(Feature):
def __init__(self, items_datasource, name=None):
name = self._format_name(name, [items_datasource])
super().__init__(name, self.process, depends_on=[items_datasource],
returns=int)
def process(self, items):
return len_builtin(items or [])
class max(Feature):
def __init__(self, items_datasource, name=None, returns=float):
name = self._format_name(name, [items_datasource])
super().__init__(name, self.process, depends_on=[items_datasource],
returns=returns)
def process(self, items):
if items is None or len_builtin(items) == 0:
return self.returns()
else:
return self.returns(max_builtin(items))
class min(Feature):
def __init__(self, items_datasource, name=None, returns=float):
name = self._format_name(name, [items_datasource])
super().__init__(name, self.process, depends_on=[items_datasource],
returns=returns)
def process(self, items):
if items is None or len_builtin(items) == 0:
return self.returns()
else:
return self.returns(min_builtin(items))
|
JulyKikuAkita/PythonPrac | cs15211/EvaluateDivision.py | Python | apache-2.0 | 9,882 | 0.004351 | __source__ = 'https://leetcode.com/problems/evaluate-division/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/evaluate-division.py
# Time: O(e + q * |V|!), |V| is the number of variables
# Space: O(e)
#
# Description: Leetcode # 399. Evaluate Division
#
# Equations are given in the format A / B = k,
# where A and B are variables represented as strings,
# and k is a real number (floating point number).
# Given some queries, return the answers.
# If the answer does not exist, return -1.0.
#
# Example:
# Given a / b = 2.0, b / c = 3.0.
# queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ? .
# return [6.0, 0.5, -1.0, 1.0, -1.0 ].
#
# The input is:
# vector<pair<string, string>> equations, vector<double>& values, vector<pair<string, string>> query .
#
# where equations.size() == values.size(),the values are positive.
# this represents the equations.return vector<double>. .
# The example above: equations = [ ["a", "b"], ["b", "c"] ].
# values = [2.0, 3.0]. queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ].
# return would be [6.00000,3.00000,-1.00000,1.00000,-1.00000]
# The input is always valid. You may assume that
# evaluating the queries will result in no division by zero and there is no contradiction.
#
# Companies
# Google
# Related Topics
# Graph
#
import unittest
import collections
import itertools
class Solution(object):
def calcEquation(self, equations, values, query):
"""
:type equations: List[List[str]]
:type values: List[float]
:type query: List[List[str]]
:rtype: List[float]
"""
def check(up, down, lookup, visited):
if up in lookup and down in lookup[up]:
return (True, lookup[up][down])
for k, v in lookup[up].iteritems():
if k not in visited:
visited.add(k)
tmp = check(k, down, lookup, visited)
if tmp[0]:
return (True, v * tmp[1])
return (False, 0)
lookup = collections.defaultdict(dict)
for i, e in enumerate(equations):
lookup[e[0]][e[1]] = values[i]
if values[i]:
lookup[e[1]][e[0]] = 1.0 / values[i]
result = []
for q in query:
visited = set()
tmp = check(q[0], q[1], lookup, visited)
result.append(tm | p[1] if tmp[0] else -1)
return result
# A variation of Floyd-Warshall, computing quotients instead of shortest paths.
# An equation A/B=k is like a graph edge A->B, and (A/B)*(B/C)*(C/D)
# is like the path A->B->C->D. Submitted once, accepted in 35 ms.
# 20ms 99.20%
def calcEquation(self, equations, values, queries):
quot = collections.defaultdict(dict)
for (num, den), val in zip(equations, values):
quot[num][num] = quot[den][den] = 1.0
quot[num][den] = val
quot[de | n][num] = 1 / val
for k, i, j in itertools.permutations(quot, 3):
if k in quot[i] and j in quot[k]:
quot[i][j] = quot[i][k] * quot[k][j]
return [quot[num].get(den, -1.0) for num, den in queries]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
graph
Image a/b = k as a link between node a and b, the weight from a to b is k,
the reverse link is 1/k. Query is to find a path between two nodes.
# 2ms 65.97%
class Solution {
public double[] calcEquation(String[][] equations, double[] values, String[][] queries) {
HashMap<String, HashMap<String, Double>> hm = initHashMap(equations, values);
double[] res = new double[queries.length];
for (int i = 0; i < queries.length; i++) {
String f = queries[i][0];
String s = queries[i][1];
if (!hm.containsKey(f) || !hm.containsKey(s)) res[i] = -1.0d;
else if (f.equals(s)) res[i] = 1.0d;
else res[i] = calcHelper(hm, f, s, new HashSet<String>());
}
return res;
}
public double calcHelper(HashMap<String, HashMap<String, Double>> hm, String f, String s, HashSet<String> visited) {
if (hm.get(f).containsKey(s)) {
return hm.get(f).get(s);
} else {
for (Map.Entry<String, Double> entry : hm.get(f).entrySet()) {
String key = entry.getKey();
if (visited.contains(key)) continue;
visited.add(key);
double res = calcHelper(hm, key, s, visited);
if (res != -1.0d) return res * entry.getValue();
}
return -1.0d;
}
}
public HashMap<String, HashMap<String, Double>> initHashMap(String[][] equations, double[] values) {
HashMap<String, HashMap<String, Double>> hm = new HashMap();
for (int i = 0; i < equations.length; i++) {
String f = equations[i][0];
String s = equations[i][1];
putInMap(hm, f, s, values[i]);
putInMap(hm, s, f, 1.0d / values[i]);
}
return hm;
}
public void putInMap(HashMap<String, HashMap<String, Double>> hm, String f, String s, double values) {
hm.computeIfAbsent(f, k -> new HashMap<String, Double>()).put(s, values);
}
}
# 1ms 100%
class Solution {
public double[] calcEquation(String[][] equations, double[] values, String[][] queries) {
HashMap<String, ArrayList<String>> pairs = new HashMap<String, ArrayList<String>>();
HashMap<String, ArrayList<Double>> valuesPair = new HashMap<String, ArrayList<Double>>();
for (int i = 0; i < equations.length; i++) { //equations = [ ["a", "b"], ["b", "c"] ],
String[] equation = equations[i];
if (!pairs.containsKey(equation[0])) {
pairs.put(equation[0], new ArrayList<String>());
valuesPair.put(equation[0], new ArrayList<Double>());
}
if (!pairs.containsKey(equation[1])) {
pairs.put(equation[1], new ArrayList<String>());
valuesPair.put(equation[1], new ArrayList<Double>());
}
pairs.get(equation[0]).add(equation[1]);
pairs.get(equation[1]).add(equation[0]);
valuesPair.get(equation[0]).add(values[i]);
valuesPair.get(equation[1]).add(1 / values[i]);
}
//queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ].
double[] res = new double[queries.length];
for (int i = 0; i < queries.length; i++) {
String[] query = queries[i];
res[i] = dfs(query[0], query[1], pairs, valuesPair, new HashSet<String>(), 1.0);
if (res[i] == 0.0) res[i] = -1.0;
}
return res;
}
private double dfs(String start, String end, HashMap<String, ArrayList<String>> pairs, HashMap<String, ArrayList<Double>> values, HashSet<String> set, double value) {
if (set.contains(start)) return 0.0;
if (!pairs.containsKey(start)) return 0.0;
if (start.equals(end)) return value;
set.add(start);
ArrayList<String> strList = pairs.get(start);
ArrayList<Double> valueList = values.get(start);
double tmp = 0.0;
for (int i = 0; i < strList.size(); i++) {
tmp = dfs(strList.get(i), end, pairs, values, set, value * valueList.get(i));
if (tmp != 0.0) break;
}
set.remove(start);
return tmp;
}
}
# https://leetcode.com/problems/evaluate-division/discuss/224011/Java-Union-Find-Solution-(Beats-100)
# Unioin Find
# 1ms 100%
class Solution {
class DSU {
int[] parent;
double[] value;
public DSU(int size) {
parent = new int[size];
value = new double[size];
for (int i = 0; i < size; i++) {
parent[i] = i;
value[i] = 1;
}
}
public int find(int x) {
if (parent[x] != x ) {
int par = find(parent[x]);
value[x] = value[x] * value[pare |
912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/conf/locale/bg/formats.py | Python | gpl-2.0 | 772 | 0 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT | = 'd F Y' |
TIME_FORMAT = 'H:i:s'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
# NUMBER_GROUPING =
|
keiserlab/e3fp | e3fp/fingerprint/array_ops.py | Python | lgpl-3.0 | 9,285 | 0.000539 | """Various array operations.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
QUATERNION_DTYPE = np.float64
X_AXIS, Y_AXIS, Z_AXIS = np.identity(3, dtype=np.float64)
EPS = 1e-12 # epsilon, a number close to 0
# Vector Algebra Methods
def as_unit(v, axis=1):
"""Return array of unit vectors parallel to vectors in `v`.
Parameters
----------
v : ndarray of float
axis : int, optional
Axis along which to normalize length.
Returns
-------
ndarray of float : Unit vector of `v`, i.e. `v` divided by its
magnitude along `axis`.
"""
u = np.array(v, dtype=np.float64, copy=True)
if u.ndim == 1:
sqmag = u.dot(u)
if sqmag >= EPS:
u /= sqmag ** 0.5
else:
if axis == 1:
sqmag = np.einsum("...ij,...ij->...i", u, u)
else:
sqmag = np.einsum("...ij,...ij->...j", u, u)
sqmag[sqmag < EPS] = 1.0
u /= np.expand_dims(np.sqrt(sqmag), axis)
return u
def make_distance_matrix(coords):
"""Build pairwise distance matrix from coordinates.
Parameters
----------
coords : ndarray of float
an Mx3 array of cartesian coordinates.
Returns
-------
ndarray of float : square symmetrical distance matrix
"""
return squareform(pdist(coords))
def make_transform_matrix(center, y=None, z=None):
"""Make 4x4 homogenous transformation matrix.
Given Nx4 array A where A[:, 4] = 1., the transform matrix M should be
used with dot(M, A.T).T. Order of operations is 1. translation, 2. align
`y` x `z` plane to yz-plane 3. align `y` to y-axis.
Parameters
----------
center : 1x3 array of float
Coordinate that should be centered after transformation.
y : None or 1x3 array of float
Vector that should lie on the y-axis after transformation
z : None or 1x3 array of float
Vector that after transformation should lie on yz-plane in direction
of z-axis.
Returns
-------
4x4 array of float
4x4 homogenous transformation matrix.
"""
translate = np.identity(4, dtype=np.float64)
translate[:3, 3] = -np.asarray(center, dtype=np.float64)
if y is not None:
y = np.atleast_2d(y)
if z is None:
rotate = np.identity(4, dt | ype=np.float64)
rotate[:3, :3] = make_rotation_matrix(y, Y_AXIS)
else:
z = np.atleast_2d(z)
rotate_norm = np.identity(4, dtype=np.float64)
x_unit = as_unit(np.cross(y, z))
rotate_norm[:3, :3] = make_rotation_matrix(x_unit, X_AXIS)
new_y = np.dot(rotate_norm[:3, :3], y.flatten())
| rotate_y = np.identity(4, dtype=np.float64)
rotate_y[:3, :3] = make_rotation_matrix(new_y.flatten(), Y_AXIS)
rotate = np.dot(rotate_y, rotate_norm)
transform = np.dot(rotate, translate)
else:
transform = translate
return transform
def make_rotation_matrix(v0, v1):
"""Create 3x3 matrix of rotation from `v0` onto `v1`.
Should be used by dot(R, v0.T).T.
Parameters
----------
v0 : 1x3 array of float
Initial vector before alignment.
v1 : 1x3 array of float
Vector to which to align `v0`.
"""
v0 = as_unit(v0)
v1 = as_unit(v1)
u = np.cross(v0.ravel(), v1.ravel())
if np.all(u == 0.0):
return np.identity(3, dtype=np.float64)
sin_ang = u.dot(u) ** 0.5
u /= sin_ang
cos_ang = np.dot(v0, v1.T)
# fmt: off
ux = np.array([[ 0., -u[2], u[1]],
[ u[2], 0., -u[0]],
[-u[1], u[0], 0.]], dtype=np.float64)
# fmt: on
rot = (
cos_ang * np.identity(3, dtype=np.float64)
+ sin_ang * ux
+ (1 - cos_ang) * np.outer(u, u)
)
return rot
def transform_array(transform_matrix, a):
"""Pad an array with 1s, transform, and return with original dimensions.
Parameters
----------
transform_matrix : 4x4 array of float
4x4 homogenous transformation matrix
a : Nx3 array of float
Array of 3-D coordinates.
Returns
-------
Nx3 array of float : Transformed array
"""
return unpad_array(np.dot(transform_matrix, pad_array(a).T).T)
def pad_array(a, n=1.0, axis=1):
"""Return `a` with row of `n` appended to `axis`.
Parameters
----------
a : ndarray
Array to pad
n : float or int, optional
Value to pad `a` with
axis : int, optional
Axis of `a` to pad with `n`.
Returns
-------
ndarray
Padded array.
"""
if a.ndim == 1:
pad = np.ones(a.shape[0] + 1, dtype=a.dtype) * n
pad[: a.shape[0]] = a
else:
shape = list(a.shape)
shape[axis] += 1
pad = np.ones(shape, dtype=a.dtype)
pad[: a.shape[0], : a.shape[1]] = a
return pad
def unpad_array(a, axis=1):
"""Return `a` with row removed along `axis`.
Parameters
----------
a : ndarray
Array from which to remove row
axis : int, optional
Axis from which to remove row
Returns
-------
ndarray
Unpadded array.
"""
if a.ndim == 1:
return a[:-1]
else:
shape = list(a.shape)
shape[axis] -= 1
return a[: shape[0], : shape[1]]
def project_to_plane(vec_arr, norm):
"""Project array of vectors to plane with normal `norm`.
Parameters
----------
vec_arr : Nx3 array
Array of N 3D vectors.
norm : 1x3 array
Normal vector to plane.
Returns
-------
Nx3 array
Array of vectors projected onto plane.
"""
unit_norm = as_unit(norm).flatten()
mag_on_norm = np.dot(vec_arr, unit_norm)
if vec_arr.ndim == 1:
vec_on_norm = np.array(unit_norm, copy=True)
vec_on_norm *= mag_on_norm
else:
vec_on_norm = np.tile(unit_norm, (vec_arr.shape[0], 1))
vec_on_norm *= mag_on_norm[:, None]
return vec_arr - vec_on_norm
def calculate_angles(vec_arr, ref, ref_norm=None):
"""Calculate angles between vectors in `vec_arr` and `ref` vector.
If `ref_norm` is not provided, angle ranges between 0 and pi. If it is
provided, angle ranges between 0 and 2pi. Note that if `ref_norm` is
orthogonal to `vec_arr` and `ref`, then the angle is rotation around the
axis, but if a non-orthogonal axis is provided, this may not be the case.
Parameters
----------
vec_arr : Nx3 array of float
Array of N 3D vectors.
ref : 1x3 array of float
Reference vector
ref_norm : 1x3 array of float
Normal vector.
Returns
-------
1-D array
Array of N angles
"""
unit_vec_arr = as_unit(vec_arr)
unit_ref = as_unit(ref).flatten()
ang = np.arccos(np.clip(np.dot(unit_vec_arr, unit_ref), -1.0, 1.0))
# handle cases where a vector is the origin
ang[np.all(unit_vec_arr == np.zeros(3), axis=1)] = 0.0
if ref_norm is not None:
sign = np.sign(
np.dot(ref_norm, np.cross(unit_vec_arr, unit_ref).T)
).flatten()
sign[sign == 0] = 1
ang = rotate_angles(sign * ang, 2 * np.pi)
return ang
def rotate_angles(angles, amount):
"""Rotate angles by `amount`, keeping in 0 to 2pi range.
Parameters
----------
angles : 1-D array of float
Angles in radians
amount : float
Amount to rotate angles by
Returns
-------
1-D array of float : Rotated angles
"""
return (angles + amount) % (2 * np.pi)
def quaternion_to_transform_matrix(quaternion, translation=np.zeros(3)):
"""Convert quaternion to homogenous 4x4 transform matrix.
Parameters
----------
quaternion : 4x1 array of float
Quaternion describing rotation after translation.
translation : 3x1 array of float, optional
Translation to be performed before rotation.
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.linalg.norm(q)
if n < 1e-12:
return np.identity(4, dtype=np.float64)
|
browniebroke/deezer-python | src/deezer/pagination.py | Python | mit | 3,359 | 0.000298 | from __future__ import annotations
from typing import Generator, Generic, TypeVar, overload
from urllib.parse import parse_qs, urlparse
import deezer
ResourceType = TypeVar("ResourceType")
class PaginatedList(Generic[ResourceType]):
"""Abstract paginated response from the API and make them more Pythonic."""
# Lifted and adapted from PyGithub:
# https://github.com/PyGithub/PyGithub/blob/master/github/PaginatedList.py
def __init__(
self,
client: deezer.Client,
base_path: str,
parent: deezer.Resource | None = None,
**params,
):
self.__elements: list[ResourceType] = []
self.__client = client
self.__base_path = base_path
self.__base_params = params
self.__next_path = base_path
self.__next_params = params
self.__parent = parent
self.__total = None
self.__iter = iter(self)
@overload
def __getitem__(self, index: int) -> ResourceType:
...
@overload
def __getitem__(self, index: slice) -> list[ResourceType]:
...
def __getitem__(
self,
index: int | slice,
) -> ResourceType | list[ResourceType]:
if isinstance(index, int):
self._fetch_to_index(index)
return self.__elements[index]
if index.stop is not None:
self._fetch_to_index(index.stop)
else:
while self._could_grow():
self._grow()
return self.__elements[index]
def __iter__(self) -> Generator[ResourceType, None, None]:
yield from self.__elements
while self._could_grow():
yield from self._grow()
def __next__(self) -> ResourceType:
return next(self.__iter)
def __len__(self) -> int:
return self.total
def _could_grow(self) -> bool:
return self.__next_path is not None
def _grow(self) -> list[ResourceType]:
new_elements = self._fetch_next_page()
self.__elements.extend(new_elements)
return new_elements
def _fetch_next_page(self) -> list[ResourceType]:
response_payload = self.__client.request(
"GET",
self.__next_path,
parent=self.__parent,
paginate_list=True,
**self.__next_params,
)
self.__next_path = None
self.__total = response_payload.get("total")
next_url = response_payload.get("next", None)
if next_url:
url_bits = urlparse(next_url)
self.__next_path = url_bits.path.lstrip("/")
self.__next_params = parse_qs(url_bits.query)
return response_payload["data"]
| def _fetch_to_index(self, index: int):
while len(self.__elements) <= index and self._could_grow():
| self._grow()
@property
def total(self) -> int:
"""The total number of items in the list, mirroring what Deezer returns."""
if self.__total is None:
params = self.__base_params.copy()
params["limit"] = 1
response_payload = self.__client.request(
"GET",
self.__base_path,
parent=self.__parent,
paginate_list=True,
**params,
)
self.__total = response_payload["total"]
return self.__total
|
DTOcean/dtocean-core | tests/test_data_definitions_pointdict.py | Python | gpl-3.0 | 6,319 | 0.011869 | import pytest
import matplotlib.pyplot as plt
from geoalchemy2.elements import WKTElement
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
AutoQuery,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import PointDict, PointDictColumn
def test_PointDict_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "PointDict" in all_objs.keys()
def test_PointDict():
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
test = PointDict()
raw = {"one" : (0., 0.),
"two" : (1., 1.),
"three" : (2., 2.)
}
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b["one"].x == 0.
assert b["two"].y == 1.
raw = {"one" : (0., 0., 0.),
"two" : (1., 1., 1.),
"three" : (2., 2., 2.)
}
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b["one"].x == 0.
assert b["two"].y == 1.
assert b["three"].z == 2.
raw = {"one" : (0., 0., 0., 0.),
"two" : (1., 1., 1., 1.),
"three" : (2., 2., 2., 2.)
}
with pytest.raises(ValueError):
test.get_data(raw, meta)
def test_get_None():
test = PointDict()
result = test.get_value(None)
assert resu | lt is None
@pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"])
def test_PointDict_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raws = [{"one" : (0., 0.),
"two" : (1., 1.),
"three" : (2., 2.)},
{"one" : (0., 0., 0.),
"two" : (1., 1., 1.),
"three" : (2., 2., 2.)}
]
ztests = [False, True]
|
for raw, ztest in zip(raws, ztests):
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
test = PointDict()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result["one"].x == 0.
assert result["two"].y == 1.
assert result["one"].has_z == ztest
def test_PointDict_auto_plot(tmpdir):
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
raw = {"one" : (0., 0.),
"two" : (1., 1.),
"three" : (2., 2.)
}
test = PointDict()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_PointDictColumn_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "PointDictColumn" in all_objs.keys()
def test_PointDictColumn_auto_db(mocker):
names = ["one", "two"]
raw_data = [[WKTElement("POINT (0 0)"), WKTElement("POINT (1 1)")],
[WKTElement("POINT (0 0 0)"), WKTElement("POINT (1 1 1)")]]
for raw in raw_data:
mock_lists = [names, raw]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "position"]})
test = PointDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
result = test.get_data(query.data.result, meta)
assert result["one"].x == 0.
assert result["two"].y == 1.
def test_PointDictColumn_auto_db_empty(mocker):
mock_lists = [[], []]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "position"]})
test = PointDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
def test_PointDictColumn_auto_db_none(mocker):
mock_lists = [[None, None], [None, None]]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "position"]})
test = PointDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
|
bitlair/synlogistics | ajax/views.py | Python | agpl-3.0 | 2,831 | 0.003179 | """
SynLogistics AJAX JSON server interaction for common search boxes and
generic interactive componen | ts.
"""
#
# Copyright (C) by Wilco Baan Hofman <wilco@baanhofman.nl> 2011
#
# This program is free software; you can red | istribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.http import HttpResponse
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from accounting.models import Account
from main.models import Relation, Product
@login_required
def get_relations(request):
# Obscure errors ftw \o/
if not 'query' in request.GET:
return HttpResponse("")
relations = Relation.objects.filter(name__icontains=request.GET['query'])
# TODO Need filter here what type is needed: supplier, customer or both
relations = relations.filter(Q(active_customer=1) | Q(active_supplier=1))
relations = relations.order_by('name')
response = "{relations:["
for relation in relations:
response += "{ id:'" + str(relation.id) + "',name:'" + relation.name + "'},"
response += "]}"
return HttpResponse(response, mimetype='application/json')
@login_required
def get_accounts(request):
# Obscure errors ftw \o/
if not 'query' in request.GET:
return HttpResponse("")
accounts = Account.objects.filter(Q(number__icontains=request.GET['query']) \
| Q(name__icontains=request.GET['query']))
accounts.order_by('number')
response = "{accounts:["
for account in accounts:
response += "{ id:'" + str(account.id) + "',name:'" + account.number + " " + account.name + "'},"
response += "]}"
return HttpResponse(response, mimetype='application/json')
@login_required
def get_products(request):
# Obscure errors ftw \o/
if not 'query' in request.GET:
return HttpResponse("")
products = Product.objects.filter(active=1)
products = products.filter(name__icontains=request.GET['query'])
products = products.filter(product_type=02)
products = products.order_by('name')
response = "{products:["
for product in products:
response += "{ id:'" + str(product.id) + "',name:'" + product.name + "'},"
response += "]}"
return HttpResponse(response, mimetype='application/json')
|
pandas-dev/pandas | pandas/core/window/expanding.py | Python | bsd-3-clause | 23,648 | 0.000634 | from __future__ import annotations
from textwrap import dedent
from typing impor | t (
TYPE_CHECKING,
Any,
Callable,
)
from pandas._typing import (
Axis,
WindowingRankType,
)
if TYPE_CHECKING:
from pandas import DataFrame, Series
from pandas.core.generic import NDFrame
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.core.indexers.objects import (
BaseIndexer,
ExpandingIndexer,
GroupbyIndexer,
)
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_ | header,
kwargs_compat,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
window_apply_parameters,
)
from pandas.core.window.rolling import (
BaseWindowGroupby,
RollingAndExpandingMixin,
)
class Expanding(RollingAndExpandingMixin):
"""
Provide expanding window calculations.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value;
otherwise, result is ``np.nan``.
center : bool, default False
If False, set the window labels as the right edge of the window index.
If True, set the window labels as the center of the window index.
.. deprecated:: 1.1.0
axis : int or str, default 0
If ``0`` or ``'index'``, roll across the rows.
If ``1`` or ``'columns'``, roll across the columns.
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
.. versionadded:: 1.3.0
Returns
-------
``Expanding`` subclass
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
See :ref:`Windowing Operations <window.expanding>` for further usage details
and examples.
Examples
--------
>>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
**min_periods**
Expanding sum with 1 vs 3 observations needed to calculate a value.
>>> df.expanding(1).sum()
B
0 0.0
1 1.0
2 3.0
3 3.0
4 7.0
>>> df.expanding(3).sum()
B
0 NaN
1 NaN
2 3.0
3 3.0
4 7.0
"""
_attributes: list[str] = ["min_periods", "center", "axis", "method"]
def __init__(
self,
obj: NDFrame,
min_periods: int = 1,
center=None,
axis: Axis = 0,
method: str = "single",
selection=None,
):
super().__init__(
obj=obj,
min_periods=min_periods,
center=center,
axis=axis,
method=method,
selection=selection,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExpandingIndexer()
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.aggregate : Similar DataFrame method.
pandas.Series.aggregate : Similar Series method.
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="expanding",
aggregation_description="count of non NaN observations",
agg_method="count",
)
def count(self):
return super().count()
@doc(
template_header,
create_section_header("Parameters"),
window_apply_parameters,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="expanding",
aggregation_description="custom aggregation function",
agg_method="apply",
)
def apply(
self,
func: Callable[..., Any],
raw: bool = False,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
):
return super().apply(
func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes[:-1],
window_method="expanding",
aggregation_description="sum",
agg_method="sum",
)
def sum(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes[:-1],
window_method="expanding",
aggregation_description="maximum",
agg_method="max",
)
def max(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes[:-1],
window_method="expanding",
aggregation_description="minimum",
agg_method="min",
)
def min(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes[:-1],
window_method="expanding",
aggregation_description="mean",
agg_method="mean",
|
mganeva/mantid | scripts/test/MultiPlotting/MultiPlottingContext_test.py | Python | gpl-3.0 | 2,543 | 0.009438 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.py3compat import | mock
from MultiPlotting.multi_plotting_context import PlottingContext
from MultiPlotting.subplot.subplot_context i | mport subplotContext
class gen_ws(object):
def __init__(self,mock):
self._input = "in"
self._OutputWorkspace = mock
def __len__(self):
return 2
@property
def OutputWorkspace(self):
return self._OutputWorkspace
class MultiPlottingContextTest(unittest.TestCase):
def setUp(self):
self.context = PlottingContext()
def test_add_line_1(self):
specNum = 4
ws = mock.MagicMock()
# add mock subplot
subplot = mock.MagicMock()
self.subplot = mock.create_autospec(subplotContext)
with mock.patch("MultiPlotting.subplot.subplot_context.subplotContext.addLine") as patch:
self.context.addSubplot("one",subplot)
self.context.addLine("one",ws,specNum)
self.assertEquals(patch.call_count,1)
patch.assert_called_with(ws,specNum)
def test_add_line_2(self):
specNum = 4
mockWS = mock.MagicMock()
ws = gen_ws(mockWS)
# add mock subplot
subplot = mock.MagicMock()
self.subplot = mock.create_autospec(subplotContext)
with mock.patch("MultiPlotting.subplot.subplot_context.subplotContext.addLine") as patch:
self.context.addSubplot("one",subplot)
self.context.addLine("one",ws,specNum)
self.assertEquals(patch.call_count,1)
patch.assert_called_with(mockWS,specNum)
def test_updateLayout(self):
# add mocks
figure = mock.Mock()
self.subplot = mock.create_autospec(subplotContext)
names = ["one","two","three"]
for name in names:
self.context.addSubplot(name, mock.Mock())
gridspec = mock.Mock()
self.context._gridspec = gridspec
with mock.patch("MultiPlotting.subplot.subplot_context.subplotContext.update_gridspec") as patch:
self.context.update_layout(figure)
self.assertEquals(patch.call_count,3)
# only last iteration survives
patch.assert_called_with(gridspec,figure,2)
if __name__ == "__main__":
unittest.main()
|
Spiderlover/Toontown | toontown/cogdominium/CogdoFlyingGameGuis.py | Python | mit | 8,010 | 0.001998 | from direct.interval.IntervalGlobal import LerpFunctionInterval
from direct.gui.DirectGui import DirectLabel, DirectFrame, DGG
from dire | ct.showbase.PythonUtil import bound as clamp
from pandac.PandaModules import TextNode, NodePath
from toontown.toonbase import ToontownGlobals
import CogdoUtil
import CogdoFlyingGameGlobals as Globals
class CogdoFlyingProgressG | ui(DirectFrame):
def __init__(self, parent, level, pos2d = Globals.Gui.ProgressPos2D):
DirectFrame.__init__(self, relief=None, state=DGG.NORMAL, sortOrder=DGG.BACKGROUND_SORT_INDEX)
self._parent = parent
self._level = level
self.reparentTo(self._parent)
self.setPos(pos2d[0], 0.0, pos2d[1])
self._levelStartY = self._level.startPlatform.getModel().getY()
self._levelEndY = self._level.endPlatform.getModel().getY()
self._levelDistance = abs(self._levelEndY - self._levelStartY)
self._toonMarkers = {}
self._initModel()
return
def destroy(self):
self._laffMeterModel.removeNode()
del self._laffMeterModel
DirectFrame.destroy(self)
def _initModel(self):
self._laffMeterModel = loader.loadModel('phase_3/models/gui/laff_o_meter')
self._model = CogdoUtil.loadFlyingModel('progressMeter', group='gui')
self._model.reparentTo(self)
self._model.setBin('fixed', 0)
self._lineStart = self._model.find('**/start_loc').getZ()
self._lineEnd = self._model.find('**/end_loc').getZ()
self._lineDistance = abs(self._lineEnd - self._lineStart)
def addToon(self, toon):
marker = NodePath('toon_marker-%i' % toon.doId)
marker.reparentTo(self)
self._getToonMarker(toon).copyTo(marker)
marker.setColor(toon.style.getHeadColor())
if toon.isLocal():
marker.setScale(Globals.Gui.LocalMarkerScale)
marker.setBin('fixed', 10)
else:
marker.setScale(Globals.Gui.MarkerScale)
marker.setBin('fixed', 5)
marker.flattenStrong()
self._toonMarkers[toon] = marker
def removeToon(self, toon):
marker = self._toonMarkers.get(toon, None)
if marker is not None:
marker.removeNode()
del self._toonMarkers[toon]
return
def _getToonMarker(self, toon):
return self._laffMeterModel.find('**/' + toon.style.getType() + 'head')
def update(self):
for toon, marker in self._toonMarkers.items():
progress = clamp((toon.getY() - self._levelStartY) / self._levelDistance, self._levelStartY, self._levelEndY)
marker.setZ(clamp(self._lineStart + self._lineDistance * progress, self._lineStart, self._lineEnd))
class CogdoFlyingFuelGui(DirectFrame):
def __init__(self, parent):
DirectFrame.__init__(self, relief=None, state=DGG.NORMAL, sortOrder=DGG.BACKGROUND_SORT_INDEX)
self.reparentTo(parent)
self.active = 0
self._initModel()
self._initIntervals()
return
def _initModel(self):
self.setPos(Globals.Gui.FuelPos2D[0], 0.0, Globals.Gui.FuelPos2D[1])
self.gui = CogdoUtil.loadFlyingModel('propellerMeter', group='gui')
self.gui.reparentTo(self)
self.gui.setBin('fixed', 0)
self.healthBar = self.gui.find('**/healthBar')
self.healthBar.setBin('fixed', 1)
self.healthBar.setColor(*Globals.Gui.FuelNormalColor)
bottomBarLocator = self.gui.find('**/bottomOfBar_loc')
bottomBarPos = bottomBarLocator.getPos(render)
topBarLocator = self.gui.find('**/topOfBar_loc')
topBarPos = topBarLocator.getPos(render)
zDist = topBarPos.getZ() - bottomBarPos.getZ()
self.fuelLowIndicator = self.gui.find('**/fuelLowIndicator')
self.fuelLowIndicator.setBin('fixed', 2)
pos = self.fuelLowIndicator.getPos(render)
newPos = pos
newPos.setZ(bottomBarPos.getZ() + zDist * Globals.Gameplay.FuelLowAmt)
self.fuelLowIndicator.setPos(render, newPos)
self.fuelVeryLowIndicator = self.gui.find('**/fuelVeryLowIndicator')
self.fuelVeryLowIndicator.setBin('fixed', 2)
pos = self.fuelVeryLowIndicator.getPos(render)
newPos = pos
newPos.setZ(bottomBarPos.getZ() + zDist * Globals.Gameplay.FuelVeryLowAmt)
self.fuelVeryLowIndicator.setPos(render, newPos)
self.propellerMain = self.gui.find('**/propellers')
self.propellerMain.setBin('fixed', 3)
self.propellerHead = self.gui.find('**/propellerHead')
self.propellerHead.setBin('fixed', 4)
self.blades = []
self.activeBlades = []
index = 1
blade = self.propellerMain.find('**/propeller%d' % index)
while not blade.isEmpty():
self.blades.append(blade)
index += 1
blade = self.propellerMain.find('**/propeller%d' % index)
for blade in self.blades:
self.activeBlades.append(blade)
self.bladeNumberLabel = DirectLabel(parent=self.propellerHead, relief=None, pos=(Globals.Gui.FuelNumBladesPos2D[0], 0, Globals.Gui.FuelNumBladesPos2D[1]), scale=Globals.Gui.FuelNumBladesScale, text=str(len(self.activeBlades)), text_align=TextNode.ACenter, text_fg=(0.0,
0.0,
-0.002,
1), text_shadow=(0.75, 0.75, 0.75, 1), text_font=ToontownGlobals.getInterfaceFont())
self.bladeNumberLabel.setBin('fixed', 5)
return
def _initIntervals(self):
self._healthIval = LerpFunctionInterval(self.healthBar.setSz, fromData=0.0, toData=1.0, duration=2.0)
self.baseSpinDuration = 2.0
self._spinIval = LerpFunctionInterval(self.propellerMain.setR, fromData=0.0, toData=-360.0, duration=self.baseSpinDuration)
def show(self):
DirectFrame.show(self)
self._spinIval.loop()
def hide(self):
DirectFrame.hide(self)
self._spinIval.pause()
def resetBlades(self):
self.setBlades(len(self.blades))
def setBlades(self, fuelState):
if fuelState not in Globals.Gameplay.FuelStates:
return
numBlades = fuelState - 1
if len(self.activeBlades) != numBlades:
for i in xrange(len(self.activeBlades)):
blade = self.activeBlades.pop()
blade.stash()
if numBlades > len(self.blades):
numBlades = len(self.blades)
for i in xrange(numBlades):
blade = self.blades[i]
self.activeBlades.append(blade)
blade.unstash()
self.bladeNumberLabel['text'] = str(len(self.activeBlades))
self.bladeNumberLabel.setText()
self.updateHealthBarColor()
def bladeLost(self):
if len(self.activeBlades) > 0:
blade = self.activeBlades.pop()
blade.stash()
self.bladeNumberLabel['text'] = str(len(self.activeBlades))
self.bladeNumberLabel.setText()
self.updateHealthBarColor()
def updateHealthBarColor(self):
color = Globals.Gui.NumBlades2FuelColor[len(self.activeBlades)]
self.healthBar.setColor(*color)
def setPropellerSpinRate(self, newRate):
self._spinIval.setPlayRate(newRate)
def setRefuelLerpFromData(self):
startScale = self.healthBar.getSz()
self._healthIval.fromData = startScale
def setFuel(self, fuel):
self.fuel = fuel
def update(self):
self.healthBar.setSz(self.fuel)
def destroy(self):
self.bladeNumberLabel.removeNode()
self.bladeNumberLabel = None
self._healthIval.clearToInitial()
del self._healthIval
self.healthBar = None
self.fuelLowIndicator = None
self.fuelVeryLowIndicator = None
self.propellerMain = None
self.propellerHead = None
del self.blades[:]
del self.activeBlades[:]
self.gui.detachNode()
self.gui = None
DirectFrame.destroy(self)
return
|
li-yuntao/SiliconLives | PytorchModels/tutorials/advanced_tutorial.py | Python | gpl-3.0 | 15,958 | 0.000251 | # -*- coding: utf-8 -*-
r"""
Advanced: Making Dynamic Decisions and the Bi-LSTM CRF
======================================================
Dynamic versus Static Deep Learning Toolkits
--------------------------------------------
Pytorch is a *dynamic* neural network kit. Another example of a dynamic
kit is `Dynet <https://github.com/clab/dynet>`__ (I mention this because
working with Pytorch and Dynet is similar. If you see an example in
Dynet, it will probably help you implement it in Pytorch). The opposite
is the *static* tool kit, which includes Theano, Keras, TensorFlow, etc.
The core difference is the following:
* In a static toolkit, you define
a computation graph once, compile it, and then stream instances to it.
* In a dynamic toolkit, you define a computation graph *for each
instance*. It is never compiled and is executed on-the-fly
Without a lot of experience, it is difficult to appreciate the
difference. One example is to suppose we want to build a deep
constituent parser. Suppose our model involves roughly the following
steps:
* We build the tree bottom up
* Tag the root nodes (the words of the sentence)
* From there, use a neural network and the embeddings
of the words to find combinations that form constituents. Whenever you
form a new constituent, use some sort of technique to get an embedding
of the constituent. In this case, our network architecture will depend
completely on the input sentence. In the sentence "The green cat
scratched the wall", at some point in the model, we will want to combine
the span :math:`(i,j,r) = (1, 3, \text{NP})` (that is, an NP constituent
spans word 1 to word 3, in this case "The green cat").
However, another sentence might be "Somewhere, the big fat cat scratched
the wall". In this sentence, we will want to form the constituent
:math:`(2, 4, NP)` at some point. The constituents we will want to form
will depend on the instance. If we just compile the computation graph
once, as in a static toolkit, it will be exceptionally difficult or
impossible to program this logic. In a dynamic toolkit though, there
isn't just 1 pre-defined computation graph. There can be a new
computation graph for each instance, so this problem goes away.
Dynamic toolkits also have the advantage of being easier to debug and
the code more closely resembling the host language (by that I mean that
Pytorch and Dynet look more like actual Python code than Keras or
Theano).
Bi-LSTM Conditional Random Field Discussion
-------------------------------------------
For this section, we will see a full, complicated example of a Bi-LSTM
Conditional Random Field for named-entity recognition. The LSTM tagger
above is typically sufficient for part-of-speech tagging, but a sequence
model like the CRF is really essential for strong performance on NER.
Familiarity with CRF's is assumed. Although this name sounds scary, all
the model is is a CRF but where an LSTM provides the features. This is
an advanced model though, far more complicated than any earlier model in
this tutorial. If you want to skip it, that is fine. To see if you're
ready, see if you can:
- Write the recurrence for the viterbi variable at step i for tag k.
- Modify the above recurrence to compute the forward variables instead.
- Modify again the above recurrence to compute the forward variables in
log-space (hint: log-sum-exp)
If you can do those three things, you should be able to understand the
code below. Recall that the CRF computes a conditional probability. Let
:math:`y` be a tag sequence and :math:`x` an input sequence of words.
Then we compute
.. math:: P(y|x) = \frac{\exp{(\text{Score}(x, y)})}{\sum_{y'} \exp{(\text{Score}(x, y')})}
Where the score is determined by defining some log potentials
:math:`\log \psi_i(x,y)` such that
.. math:: \text{Score}(x,y) = \sum_i \log \psi_i(x,y)
To make the partition function tractable, the potentials must look only
at local features.
In the Bi-LSTM CRF, we define two kinds of potentials: emission and
transition. The emission potential for the word at index :math:`i` comes
from the hidden state of the Bi-LSTM at timestep :math:`i`. The
transition scores are stored in a :math:`|T|x|T|` matrix
:math:`\textbf{P}`, where :math:`T` is the tag set. In my
implementation, :math:`\textbf{P}_{j,k}` is the score of transitioning
to tag :math:`j` from tag :math:`k`. So:
.. math:: \text{Score}(x,y) = \sum_i \log \psi_\text{EMIT}(y_i \rightarrow x_i) + \log \psi_\text{TRANS}(y_{i-1} \rightarrow y_i)
.. math:: = \sum_i h_i[y_i] + \textbf{P}_{y_i, y_{i-1}}
where in this second expression, we think of the tags as being assigned
unique non-negative indices.
If the above discussion was too brief, you can check out
`this <http://www.cs.columbia.edu/%7Emcollins/crf.pdf>`__ write up from
Michael Collins on CRFs.
Implementation Notes
--------------------
The example below implements the forward algorithm in log space to
compute the partition function, and the viterbi algorithm to | decode.
Backpropagation will compute the gradients automatically for us. We
don't have to do anything by hand.
The implementation is not optimized. If you understand what is going on,
you'll probably quickly see that iterating over the next tag in the
forward algorithm could probably be done in one big operation. I wanted
to code to be more readable. If you want to make the relevant change,
you coul | d probably use this tagger for real tasks.
"""
# Author: Robert Guthrie
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
#####################################################################
# Helper functions to make the code more readable.
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
#####################################################################
# Create model
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)),
autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
f |
KristianOellegaard/python-nagios-frontend | balbec/balbec_twisted.py | Python | agpl-3.0 | 1,355 | 0.008118 | import argparse
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.resource import Resource
import os
from balbec.jsonhandler import JSONHandler
from balbec.xmlhandler import XmlHandler
ROOT = lambda base : os.path.join(os.path.dirname(__file__), base).replace('\\','/')
class StatusPage(Resource):
isLeaf = True
| config_dir = None
def render_GET(self, request):
if request.received_headers["accept"] == "text/xml":
handler = XmlHandler(self.config_dir)
output = handler.xml()
elif request.received_headers["accept"] == "application/json":
handler = JSONHandler(self.config_dir)
output = handler.json()
else:
output = open(ROOT("static/index.html")).read()
| return output
def main():
parser = argparse.ArgumentParser(description='Run an instance of python-nagios-frontend.')
parser.add_argument('--port', dest='www_port', default=8880, help='Port for the webserver')
parser.add_argument('--configdir', dest='config_dir', default="/etc/python-nagios-frontend/", help='Path to the configuration files')
args = parser.parse_args()
resource = StatusPage()
resource.config_dir = args.config_dir
factory = Site(resource)
reactor.listenTCP(int(args.www_port), factory)
reactor.run() |
DFEC-R2D2/r2d2 | pygecko/library/battery.py | Python | mit | 1,763 | 0.038003 |
from __future__ import division
from __future__ import print_function
import time
class BatteryLED(object):
def __init__(self, led_matrix):
"""
Handles the battery LED matrix display
"""
self.led = led_matrix
def display(self, value):
"""
Display a value to the LED matrix
"""
battled = self.led
# Sensor Reading
# arduinoSerialData.write('2')
# Grabs Sensor Data
# batt = float(arduinoSerialData.readline())
# Added 99 to prevent Static Mode Sensor Reading Collision
batt = value + 99.55
print(batt)
# 100 to 87.5 Battery
if batt > 104.13:
battled.clear()
for i in range(0, 8):
for j in range(0, 8):
battled.set(i, j, 1)
# 75 Battery
elif batt > 103.94 and batt <= 104.13:
battled.clear()
for i in range(2, 8):
for j in range(0, 8):
battled.set(i, j, 1)
# 62.5 Battery
elif batt > 103.75 an | d batt <= 103.94:
battled.clear()
for i in range(3, 8):
for j in range(0, 8):
battled.set(i, j, 1)
# 50 Battery
elif batt > 103.56 and batt <= 103.75:
battled.clear()
for i in range(4, 8):
for j in range(0, 8):
| battled.set(i, j, 3)
# 37.5 Battery
elif batt > 103.40 and batt <= 103.56:
battled.clear()
for i in range(5, 8):
for j in range(0, 8):
battled.set(i, j, 3)
# 25 Battery
elif batt > 103.19 and batt <= 103.40:
battled.clear()
for i in range(6, 8):
for j in range(0, 8):
battled.set(i, j, 2)
# 12.5 Battery
elif batt > 103.1 and batt <= 103.19:
battled.clear()
for i in range(7, 8):
for j in range(0, 8):
battled.set(i, j, 2)
# 0 Battery
elif batt < 103.1:
battled.clear()
for i in range(0, 8):
for j in range(0, 8):
battled.set(i, j, 2)
battled.write()
time.sleep(1.5)
|
CZ-NIC/knot | tests-extra/tests/dnssec/offline_ksk/test.py | Python | gpl-3.0 | 9,849 | 0.003148 | #!/usr/bin/env python3
"""
Test of offline signing using KSR and SKR with pre-planned KSK rollover and automatic ZSK rollover.
"""
import collections
import os
import shutil
import datetime
import subprocess
import time
import random
from subprocess import check_call
from dnstest.utils import *
from dnstest.keys import Keymgr
from dnstest.test import Test
def cripple_skr(skr_in, skr_out):
rrsigs_total = 9
after_rrsig = -1000
rrsig_now = 0
rrsig_chosen = random.randint(1, rrsigs_total)
with open(skr_in, "r") as fin:
| with | open(skr_out, "w") as fout:
for linein in fin:
lineout = linein
linesplit = linein.split()
if len(linesplit) > 2 and linesplit[2] == "RRSIG":
after_rrsig = 0
rrsig_now += 1
else:
after_rrsig += 1
if after_rrsig == 3 and rrsig_now == rrsig_chosen:
lineout = linein.lower() # this crippels the rrsig
fout.write(lineout)
# check zone if keys are present and used for signing
def check_zone(server, zone, dnskeys, dnskey_rrsigs, soa_rrsigs, msg):
qdnskeys = server.dig("example.com", "DNSKEY", bufsize=4096)
found_dnskeys = qdnskeys.count("DNSKEY")
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
qsoa = server.dig("example.com", "SOA", dnssec=True, bufsize=4096)
found_soa_rrsigs = qsoa.count("RRSIG")
check_log("DNSKEYs: %d (expected %d)" % (found_dnskeys, dnskeys));
check_log("RRSIGs: %d (expected %d)" % (found_soa_rrsigs, soa_rrsigs));
check_log("DNSKEY-RRSIGs: %d (expected %d)" % (found_rrsigs, dnskey_rrsigs));
if found_dnskeys != dnskeys:
set_err("BAD DNSKEY COUNT: " + msg)
detail_log("!DNSKEYs not published and activated as expected: " + msg)
if found_soa_rrsigs != soa_rrsigs:
set_err("BAD RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
if found_rrsigs != dnskey_rrsigs:
set_err("BAD DNSKEY RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
detail_log(SEP)
server.zone_backup(zone, flush=True)
server.zone_verify(zone)
def wait_for_rrsig_count(t, server, rrtype, rrsig_count, timeout):
endtime = time.monotonic() + timeout - 0.5
first = True
while True:
qdnskeyrrsig = server.dig("example.com", rrtype, dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
if found_rrsigs == rrsig_count:
break
if first:
first = False
# Verify the zone instead of a dumb sleep
server.zone_backup(zone, flush=True)
server.zone_verify(zone)
else:
t.sleep(0.5)
if time.monotonic() > endtime:
break
def wait_for_dnskey_count(t, server, dnskey_count, timeout):
endtime = time.monotonic() + timeout - 0.5
first = True
while True:
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_dnskeys = qdnskeyrrsig.count("DNSKEY")
if found_dnskeys == dnskey_count:
break
if first:
first = False
# Verify the zone instead of a dumb sleep
server.zone_backup(zone, flush=True)
server.zone_verify(zone)
else:
t.sleep(0.5)
if time.monotonic() > endtime:
break
def writef(filename, contents):
with open(filename, "w") as f:
f.write(contents)
t = Test()
knot = t.server("knot")
ZONE = "example.com."
FUTURE = 55
TICK = 5
STARTUP = 10
NONSENSE = 4396
zone = t.zone(ZONE)
t.link(zone, knot)
knot.zonefile_sync = 24 * 60 * 60
knot.dnssec(zone).enable = True
knot.dnssec(zone).manual = True
knot.dnssec(zone).offline_ksk = True
knot.dnssec(zone).alg = "ECDSAP384SHA384"
knot.dnssec(zone).dnskey_ttl = 2
knot.dnssec(zone).zone_max_ttl = 3
knot.dnssec(zone).zsk_lifetime = STARTUP + 6 * TICK # see ksk1 lifetime
knot.dnssec(zone).ksk_lifetime = NONSENSE
knot.dnssec(zone).propagation_delay = TICK - 2
knot.dnssec(zone).cds_publish = "rollover"
knot.dnssec(zone).rrsig_lifetime = 15
knot.dnssec(zone).rrsig_refresh = 6
knot.dnssec(zone).rrsig_prerefresh = 1
# needed for keymgr
knot.gen_confile()
signer = t.server("knot")
t.link(zone, signer)
# mandatory options
signer.dnssec(zone).enable = True
signer.dnssec(zone).manual = True
signer.dnssec(zone).offline_ksk = True
# needed options
signer.dnssec(zone).alg = "ECDSAP384SHA384"
# options without any effect
signer.dnssec(zone).dnskey_ttl = int(NONSENSE / 10)
signer.dnssec(zone).zone_max_ttl = NONSENSE
signer.dnssec(zone).ksk_lifetime = NONSENSE * 2
signer.dnssec(zone).propagation_delay = int(NONSENSE / 10)
signer.dnssec(zone).cds_publish = random.choice(["none", "rollover"])
signer.dnssec(zone).rrsig_lifetime = 6
signer.dnssec(zone).rrsig_refresh = 2
signer.dnssec(zone).rrsig_prerefresh = 1
# needed for keymgr
signer.gen_confile()
def tickf(when):
return "+%d" % (STARTUP + when * TICK)
# generate keys, including manual KSK rollover on the beginning
key_ksk1 = signer.key_gen(ZONE, ksk="true", created="+0", publish="+0", ready="+0", active="+0", retire=tickf(4), remove=tickf(5))
key_ksk2 = signer.key_gen(ZONE, ksk="true", created="+0", publish=tickf(2), ready=tickf(3), active=tickf(4), retire="+2h", remove="+3h")
key_zsk1 = knot.key_gen(ZONE, ksk="false", created="+0", publish="+0", active="+0")
# pregenerate keys, exchange KSR, pre-sign it, exchange SKR
KSR = knot.keydir + "/ksr"
SKR = knot.keydir + "/skr"
SKR_BROKEN = SKR + "_broken"
Keymgr.run_check(knot.confile, ZONE, "pregenerate", "+20", "+" + str(FUTURE))
_, out, _ = Keymgr.run_check(knot.confile, ZONE, "generate-ksr", "+0", "+" + str(FUTURE))
writef(KSR, out)
_, out, _ = Keymgr.run_check(signer.confile, ZONE, "sign-ksr", KSR)
writef(SKR, out)
cripple_skr(SKR, SKR_BROKEN)
_, _, err = Keymgr.run_check(knot.confile, ZONE, "validate-skr", SKR_BROKEN)
if err.split()[0] != "Error:":
set_err("keymgr validate-skr")
detail_log(err)
Keymgr.run_fail(knot.confile, ZONE, "import-skr", SKR_BROKEN)
Keymgr.run_check(knot.confile, ZONE, "import-skr", SKR)
TICK_SAFE = TICK + TICK // 2;
# run it and see if the signing and rollovers work well
t.start()
knot.zone_wait(zone)
check_zone(knot, zone, 2, 1, 1, "init")
wait_for_dnskey_count(t, knot, 3, STARTUP + TICK_SAFE)
check_zone(knot, zone, 3, 2, 1, "KSK rollover: publish")
wait_for_dnskey_count(t, knot, 2, TICK_SAFE * 3)
check_zone(knot, zone, 2, 1, 1, "KSK rollover: finished")
wait_for_dnskey_count(t, knot, 3, TICK_SAFE * 2)
check_zone(knot, zone, 3, 1, 1, "ZSK rollover: running")
wait_for_dnskey_count(t, knot, 2, TICK_SAFE * 2)
check_zone(knot, zone, 2, 1, 1, "ZSK rollover: done")
# re-generate keys, re-eschange KSR and SKR and re-import it over previous
STARTUP = 1
signer.key_set(ZONE, key_ksk2, retire=tickf(3), remove=tickf(4))
key_ksk3 = signer.key_gen(ZONE, ksk="true", created="+0", publish=tickf(1), ready=tickf(2), active=tickf(3), retire="+4h", remove="+5h")
knot.dnssec(zone).zsk_lifetime = 8 * TICK
knot.gen_confile()
KSR = KSR + "2"
SKR = SKR + "2"
Keymgr.run_check(knot.confile, ZONE, "pregenerate", "+" + str(FUTURE))
_, out, _ = Keymgr.run_check(knot.confile, ZONE, "generate-ksr", "+0", "+" + str(FUTURE))
writef(KSR, out)
_, out, _ = Keymgr.run_check(signer.confile, ZONE, "sign-ksr", KSR)
writef(SKR, out)
Keymgr.run_check(knot.confile, ZONE, "import-skr", SKR)
knot.ctl("zone-keys-load")
check_zone(knot, zone, 2, 1, 1, "init2")
wait_for_dnskey_count(t, knot, 3, STARTUP + TICK_SAFE)
check_zone(knot, zone, 3, 2, 1, "KSK rollover2: publish")
wait_for_dnskey_count(t, knot, 2, TICK_SAFE * 3)
check_zone(knot, zone, 2, 1, 1, "KSK rollover2: finished")
wait_for_dnskey_count(t, knot, 3, TICK_SAFE * 3)
check_zone(knot, zone, 3, 1, 1, "ZSK rollover2: running")
wait_for_dnskey_count(t, knot, 2, TICK_SAFE * 2)
check_zone(knot, zone, 2, 1, 1, "ZSK rollover2: done")
# prepare algorithm r |
arturh85/projecteuler | python/src/problem040.py | Python | mit | 1,911 | 0.002616 | '''
Problem 40
28 March 2003
An irrational decimal fraction is created by concatenating the positive integers:
0.123456789101112131415161718192021...
It can be seen that the 12th digit of the fractional part is 1.
If dn represents the nth digit of the fractional part, find the value of the following expression.
d1 x d10 x d100 x d1000 x d10000 x d100000 x d1000000
-------------------- | --------------------------------------
Created on 30.01.2015
@author: ahallmann
'''
import unittest
import timeit
import operator
def generate_problem_fraction():
i = 1
while True:
digits = list | (str(i))
for digit in digits:
yield digit
i += 1
def find_generated_values_at(generator, positions):
i = 1
last_position = max(positions)
values = []
for value in generator():
if value is None:
break
if i in positions:
values.append(value)
i += 1
if i > last_position:
break
return map(int, values)
def solve():
return reduce(operator.mul, find_generated_values_at(generate_problem_fraction, [1, 10, 100, 1000, 10000, 100000, 1000000]))
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual([1], find_generated_values_at(generate_problem_fraction, [12]))
self.assertEqual([1, 0, 1, 1], find_generated_values_at(generate_problem_fraction, [10, 11, 12, 13]))
pass
def test_answer(self):
self.assertEqual(210, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
run()
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
|
ronhanson/python-jobmanager-api | jobmanager/api.py | Python | mit | 16,531 | 0.004174 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2015 Ronan Delacroix
Python Job Manager Server API
:author: Ronan Delacroix
"""
from flask import Flask | , request, Response, render_template, url_for
from functools import wraps
import json
from flask.views import MethodView
import mongoengine.base.common
import mongoengine.errors
import mongoengine.connection
from jobmanager.common as common
from jobmanager.common.job import Job
from jobmana | ger.common.host import Host, HostStatus
import tbx
import tbx.text
import tbx.code
import logging
import arrow
import traceback
from bson.code import Code
from collections import defaultdict
from datetime import datetime, timedelta
# Flask
app = Flask(__name__, static_folder='static', static_url_path='/static', template_folder='templates')
app.secret_key = "jobmanager-api-secret-key-01"
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
APP_NAME = "Job Manager"
def serialize_response(result):
mimetype = request.accept_mimetypes.best_match(tbx.text.mime_rendering_dict.keys(), default='application/json')
if request.args.get('format') and request.args.get('format') in tbx.text.mime_shortcuts.keys():
mimetype = tbx.text.mime_shortcuts.get(request.args.get('format'))
code = 200
if isinstance(result, common.BaseDocument):
result = result.to_safe_dict()
if isinstance(result, common.SerializableQuerySet):
result = result.to_safe_dict()
assert isinstance(result, dict) or isinstance(result, list)
result = common.change_keys(result, common.replace_cls_type)
return Response(tbx.text.render_dict_from_mimetype(result, mimetype), status=code, mimetype=mimetype)
#decorator
def serialize(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return serialize_response(result)
return wrapper
def plain_text(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return Response(result, status=200, mimetype='text/plain')
return wrapper
def register_api(view, endpoint, url, pk='uuid', pk_type='string(length=11)'):
view_func = view.as_view(endpoint)
app.add_url_rule(url, defaults={pk: None}, view_func=view_func, methods=['GET',])
app.add_url_rule(url, view_func=view_func, methods=['POST',])
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func, methods=['GET', 'PUT', 'DELETE'])
def import_from_name(module_name):
globals()[module_name] = __import__(module_name)
def find_job_type(job_type, module=None):
cls = None
try:
cls = mongoengine.base.common.get_document(job_type)
except mongoengine.errors.NotRegistered:
pass
additionnal_error_info = ""
if not cls and module:
try:
import_from_name(module)
except ImportError:
additionnal_error_info = " and module '%s' could not be imported" % module
try:
cls = mongoengine.base.common.get_document(job_type)
except mongoengine.errors.NotRegistered:
pass
if not cls:
raise Exception("Job type '%s' is unknown %s." % (job_type, additionnal_error_info))
return cls
###
# API Definition
###
class JobAPI(MethodView):
decorators = [serialize]
def get(self, uuid=None):
if uuid:
return Job.objects.get(uuid=uuid)
else:
lim = int(request.args.get('limit', 10))
off = int(request.args.get('offset', 0))
job_type = request.args.get('type', None)
host = request.args.get('host', None)
filters = {}
if job_type:
filters['_cls'] = job_type
if host:
filters['host'] = host
return Job.objects(**filters).order_by('-created')[off:lim]
@classmethod
def live(cls, uuid=''):
args = dict(request.args.items())
args['job_uuid'] = ''
if uuid:
job = Job.objects.get(uuid=uuid)
if not job:
raise Exception("Job %s not found" % uuid)
args['job_uuid'] = job.uuid
return render_template('job/live.html',
app_name=APP_NAME,
title=APP_NAME+" - %s Job view" % (uuid),
**args)
@classmethod
@serialize
def doc(cls, job_class_name=''):
if job_class_name:
job_class = find_job_type(job_class_name)
if not job_class:
raise Exception("Job %s not found" % job_class_name)
return job_class.get_doc()
else:
return {j.__name__: j.__doc__.strip() for j in tbx.code.get_subclasses(Job)}
def post(self):
data = request.data.decode('UTF-8')
data = json.loads(data)
job_type = data.pop('type', None)
module = data.pop('module', None)
if not job_type:
raise Exception("Job has no 'type' field or is not set (value='%s')." % type)
cls = find_job_type(job_type, module=module)
new_data = common.change_keys(data, replace_type_cls)
new_job = cls.from_json(tbx.text.render_json(new_data))
new_job.save()
logging.info("New Job created")
logging.info(str(new_job))
return new_job
def delete(self, uuid):
# delete a single job
raise NotImplementedError()
def put(self, uuid):
job = Job.objects.get(uuid=uuid)
data = request.data.decode('UTF-8')
data = json.loads(data)
job.update(**data)
job.reload()
logging.info("Updated Job %s" % uuid)
logging.info(str(job))
job.save()
return job
###
# API Definition
###
class HostAPI(MethodView):
decorators = [serialize]
def get(self, hostname=None):
lim = int(request.args.get('limit', 10))
off = int(request.args.get('offset', 0))
step = int(request.args.get('step', 0))
if hostname:
host = Host.objects.get(hostname=hostname)
if not host:
raise Exception("Host %s not found." % hostname)
return host.to_safe_dict(alive=True, with_history=True, limit=lim, offset=off, step=step)
else:
if 'alive' in request.args:
alive = int(request.args.get('alive', 1))
alive_hosts = HostStatus.objects(created__gte=datetime.utcnow() - timedelta(minutes=alive)).aggregate({"$group": { "_id": "$host.hostname" }})
alive_hostnames = [cs['_id'] for cs in alive_hosts]
return Host.objects(hostname__in=alive_hostnames).order_by('-created')[off:lim].to_safe_dict()
else:
return Host.objects.order_by('-created')[off:lim].to_safe_dict()
def put(self, hostname):
host = Host.objects.get(hostname=hostname)
data = request.data.decode('UTF-8')
data = json.loads(data)
host.update(**data)
host.reload()
logging.info("Updated Host %s" % hostname)
logging.info(str(host))
host.save()
return host
@classmethod
@serialize
def stats(cls, hostname):
lim = int(request.args.get('limit', 50))
db = mongoengine.connection.get_db()
hosts = Host.objects(hostname=hostname)
if not hosts:
raise Exception("No host found with hostname %s" % hostname)
host = hosts[0]
job_count = Job.objects(hostname=hostname).count()
jobs = [
{'uuid': j.uuid, 'status': j.status, 'completion': j.completion, 'type': j._cls.replace('Job.', ''), 'created': j.created}
for j in Job.objects(hostname=hostname).order_by('-created')[0:lim]
]
t = Job.objects(hostname=hostname).explain()
job_statuses_pipeline = [
#{'$match': {'host.hostname': hostname}},
{'$match': {'hostname': hostname}},
{'$limit': lim},
{'$sort': {"created": -1}},
{'$group': {
'_id': "$status",
|
the-packet-thrower/pynet | Week02/snmp-test.py | Python | gpl-3.0 | 281 | 0.003559 | from snmp_helper import snmp_ | get_oid,snmp_extract
COMMU | NITY_STRING = 'galileo'
SNMP_PORT = 161
IP = '184.105.247.70'
a_device = (IP, COMMUNITY_STRING, SNMP_PORT)
OID = '1.3.6.1.2.1.1.1.0'
snmp_data = snmp_get_oid(a_device, oid=OID)
output = snmp_extract(snmp_data)
print output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.