repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
kwss/keystone | keystone/common/wsgi_server.py | Python | apache-2.0 | 4,979 | 0.000803 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import sys
# NOTE(mikal): All of this is because if dnspython is present in your
# environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before '
'keystone.common.wsgi_server '
'(EVENTLET_NO_GREENDNS env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
import eventlet.wsgi
from keystone.common import logging
from keystone.common import wsgi
LOG = logging.getLogger(__name__)
def monkey_patch_eventlet(monkeypatch_thread=None):
if monkeypatch_thread is None:
monkeypatch_thread = not os.getenv('STANDARD_THREADS')
eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
thread=monkeypatch_thread)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
self.do_ssl = False
self.cert_required = False
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.debug(_('Starting %(arg0)s on %(host)s:%(port)s') %
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
info = socket.getaddrinfo(self.host,
self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
_socket = eventlet.listen(info[-1],
family=info[0],
backlog=backlog)
if key:
self.socket_info[key] = _socket.getsockname()
# SSL is enabled
if self.do_ssl:
if self.cert_required:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
sslsocket = eventlet.wrap_ssl(_socket, certfile=self.certfile,
keyfile=self.keyfile,
server_side=True,
cert_reqs=cert_reqs,
ca_certs=self.ca_certs)
_socket = sslsocket
self.greenthread = self.pool.spawn(self._run,
self.application,
_socket)
def set_ssl(self, certfile, keyfile=None, ca_certs=None,
cert_required=True):
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
| self.cert_required = cert_required
self.do_ssl = True
def kill(self):
if self.greenthread:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run | (self, application, socket):
"""Start a WSGI server in a new green thread."""
log = logging.getLogger('eventlet.wsgi.server')
try:
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=wsgi.WritableLogger(log))
except Exception:
LOG.exception(_('Server error'))
raise
|
blowekamp/itkBinShrink | Documentation/utils/make_marschner_lobb.py | Python | apache-2.0 | 2,435 | 0.014374 | #!/bin/env python
import SimpleITK as sitk
import numpy as np
import math
import time
def marschner_lobb(size=40, alpha=0.25, f_M=6.0):
img = sitk.PhysicalPointSource( sitk.sitkVectorFloat32, [size]*3, [-1]*3, [2.0/size]*3)
imgx = sitk.Vecto | rIndexSelectionCast(img, 0)
imgy = sitk.VectorIndexSelectionCast(img, 1)
imgz = sitk.VectorIndexSelectionCast(img, 2)
del img
r = sitk.Sqrt(imgx**2 + imgy**2)
del imgx, imgy
pr = sitk.Cos((2.0*math.pi*f_M)*sitk.Cos((math.pi/2.0)*r)) |
return (1.0 - sitk.Sin((math.pi/2.0)*imgz) + alpha*(1.0+pr))/(2.0*(1.0+alpha))
ml = marschner_lobb(128)
zslice = ml.GetSize()[-1]//2
print zslice
ml = sitk.Normalize(ml)
n = np.random.normal(0, scale=1.0, size=ml.GetSize())
img_noise = sitk.GetImageFromArray(n)
img_noise.CopyInformation(ml)
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(ml)
resample.SetInterpolator(sitk.sitkNearestNeighbor)
cm_enum = sitk.ScalarToRGBColormapImageFilter.Jet
sn_list = [100.0, 2.0, 1.0, 1.0/2.0, 1.0/4.0]
#
# Bin Shrink
#
def my_shrink(img, shrink):
return sitk.Normalize(resample.Execute(sitk.BinShrink(img,[shrink]*3)))
imgs = []
for sn in sn_list:
img = (ml*sn+img_noise)/(1.0+sn)
tiles = [sitk.Normalize(img),
my_shrink(img,2),
my_shrink(img,4)
]
row = sitk.Tile(tiles,[len(tiles),1,1])[:,:,zslice]
sitk.Show(row, "SN: "+str(sn))
time.sleep(.1)
imgs.extend( tiles )
t = sitk.Tile(imgs, [3,len(imgs)//3,1])
sitk.Show(sitk.ScalarToRGBColormap(t,cm_enum), "composite ")
sitk.WriteImage(sitk.ScalarToRGBColormap(t,cm_enum), "binshrink_hot.png")
#
# Gaussian
#
def my_smooth(img, shrink):
sigma = shrink*0.7*img.GetSpacing()[0]
s = sitk.Shrink(sitk.SmoothingRecursiveGaussian(img, sigma), [shrink]*3)
# return sitk.Normalize(resample.Execute(s))
return sitk.Normalize(sitk.SmoothingRecursiveGaussian(img, sigma))
imgs = []
for sn in sn_list:
img = (ml*sn+img_noise)/(1.0+sn)
tiles = [sitk.Normalize(img),
my_smooth(img,2),
my_smooth(img,4)
]
row = sitk.Tile(tiles,[len(tiles),1,1])[:,:,zslice]
sitk.Show(row, "Smooth SN: "+str(sn))
time.sleep(.1)
imgs.extend( tiles )
t = sitk.Tile(imgs, [3,len(imgs)//3,1])
sitk.Show(sitk.ScalarToRGBColormap(t,cm_enum), "Smooth composite ")
sitk.WriteImage(sitk.ScalarToRGBColormap(t,cm_enum), "gaussianshrink_hot.png")
|
starsirius/mongoengine | mongoengine/errors.py | Python | mit | 3,834 | 0 | from collections import defaultdict
from mongoengine.python_support import txt_type
__all__ = ('NotRegistered', 'InvalidDocumentError', 'LookUpError',
'DoesNotExist', 'MultipleObjectsReturned', 'InvalidQueryError',
'OperationError', 'NotUniqueError', 'FieldDoesNotExist',
'ValidationError')
class NotRegistered(Exception):
pass
class InvalidDocumentError(Exception):
pass
class LookUpError(AttributeError):
pass
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
class NotUniqueError(OperationError):
pass
class FieldDoesNotExist(Exception):
"""Raised when trying to set a field
not declared in a :class:`~mongoengine.Document`
or an :class:`~mongoengine.EmbeddedDocument`.
To avoid this behavior on data loading,
you should the :attr:`strict` to ``False``
in the :attr:`meta` dictionnary.
"""
class ValidationError(AssertionError):
"""Validation exception.
May represent an error validating a field or a
document containing fields with validation errors.
:ivar errors: A dictionary of errors for fields | within this
document or list, or None if the error is for an
individual field.
"""
errors = {}
field_name = None
_message = None
def __init__(self, message="", **kwargs):
self.errors = kwargs.get('errors', {})
self.field_name = kwargs.get( | 'field_name')
self.message = message
def __str__(self):
return txt_type(self.message)
def __repr__(self):
return '%s(%s,)' % (self.__class__.__name__, self.message)
def __getattribute__(self, name):
message = super(ValidationError, self).__getattribute__(name)
if name == 'message':
if self.field_name:
message = '%s' % message
if self.errors:
message = '%s(%s)' % (message, self._format_errors())
return message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def to_dict(self):
"""Returns a dictionary of all errors within a document
Keys are field names or list indices and values are the
validation error messages, or a nested dictionary of
errors for an embedded document or list.
"""
def build_dict(source):
errors_dict = {}
if not source:
return errors_dict
if isinstance(source, dict):
for field_name, error in source.iteritems():
errors_dict[field_name] = build_dict(error)
elif isinstance(source, ValidationError) and source.errors:
return build_dict(source.errors)
else:
return unicode(source)
return errors_dict
if not self.errors:
return {}
return build_dict(self.errors)
def _format_errors(self):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join(
[generate_key(v, k) for k, v in value.iteritems()])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in self.to_dict().iteritems():
error_dict[generate_key(v)].append(k)
return ' '.join(["%s: %s" % (k, v) for k, v in error_dict.iteritems()])
|
ngardamala/the_trembling_info | articles/templatetags/links.py | Python | gpl-2.0 | 546 | 0.007326 | from django import template
from articles.models import Category, Article
register = template.Library()
# get list of all cate | gories
@register.inclusion_tag('links_categories.html')
def show_categories():
categories = Category.objects.all()
return {'categories': categories}
# get list of most popular stories
@register.inclusion_tag('links_most_popular.html')
def show_most_popular():
# get 7 most popular stories
articles = Article.objects.filter(allowed_to_pub=T | rue).order_by('-views')[:7]
return {'articles': articles} |
team-vigir/vigir_footstep_planning_basics | vigir_footstep_planning_widgets/src/vigir_footstep_planning_widgets/pattern_generator_widget.py | Python | gpl-3.0 | 7,255 | 0.002343 | #!/usr/bin | /env python
import math
import rospy
import tf
import std_msgs.msg
from rqt_gui_py.plugin import Plugin
from python_qt_binding.QtCore import Qt, Slot, QAbstractListModel
from python_qt_binding.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QCheckB | ox, QLabel, QListWidget, QPushButton, QDoubleSpinBox, QFrame
from vigir_footstep_planning_msgs.msg import PatternGeneratorParameters
from vigir_footstep_planning_lib.parameter_set_widget import *
from vigir_footstep_planning_lib.qt_helper import *
from vigir_footstep_planning_lib.logging import *
class PatternGeneratorDialog(Plugin):
def __init__(self, context):
super(PatternGeneratorDialog, self).__init__(context)
self.setObjectName('PatternGeneratorDialog')
self._parent = QWidget()
self._widget = PatternGeneratorWidget(self._parent)
context.add_widget(self._parent)
def shutdown_plugin(self):
self._widget.shutdown_plugin()
class PatternGeneratorWidget(QObject):
enable_pattern_generator = False
def __init__(self, context):
super(PatternGeneratorWidget, self).__init__()
# publisher
self.pattern_generator_params_pub = rospy.Publisher('pattern_generator/set_params', PatternGeneratorParameters, queue_size = 1)
# start widget
widget = context
# start upper part
hbox = QHBoxLayout()
# start left column
left_vbox = QVBoxLayout()
# start button
start_command = QPushButton("Start")
left_vbox.addWidget(start_command)
# simulation checkbox
self.simulation_mode_checkbox = QCheckBox()
self.simulation_mode_checkbox.setText("Simulation Mode")
self.simulation_mode_checkbox.setChecked(False)
left_vbox.addWidget(self.simulation_mode_checkbox)
# realtime checkbox
self.realtime_mode_checkbox = QCheckBox()
self.realtime_mode_checkbox.setText("Realtime Mode")
self.realtime_mode_checkbox.setChecked(False)
left_vbox.addWidget(self.realtime_mode_checkbox)
# joystick checkbox
self.joystick_mode_checkbox = QCheckBox()
self.joystick_mode_checkbox.setText("Joystick Mode")
self.joystick_mode_checkbox.setChecked(False)
left_vbox.addWidget(self.joystick_mode_checkbox)
# ignore invalid steps checkbox
self.ignore_invalid_steps_checkbox = QCheckBox()
self.ignore_invalid_steps_checkbox.setText("Ignore Invalid Steps")
self.ignore_invalid_steps_checkbox.setChecked(True)
left_vbox.addWidget(self.ignore_invalid_steps_checkbox)
# foot seperation
self.foot_seperation = generate_q_double_spin_box(0.2, 0.15, 0.3, 2, 0.01)
self.foot_seperation.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.foot_seperation, "Foot Seperation (m):")
# delta x
self.delta_x = generate_q_double_spin_box(0.0, -0.4, 0.4, 2, 0.01)
self.delta_x.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.delta_x, "dX (m):")
# delta y
self.delta_y = generate_q_double_spin_box(0.0, -2.2, 2.2, 2, 0.01)
self.delta_y.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.delta_y, "dY (m):")
# delta yaw
self.delta_yaw = generate_q_double_spin_box(0.0, -30.0, 30.0, 0, 1.0)
self.delta_yaw.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.delta_yaw, "dYaw (deg):")
# roll
self.roll = generate_q_double_spin_box(0.0, -30.0, 30.0, 0, 1.0)
self.roll.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.roll, "Roll (deg):")
# pitch
self.pitch = generate_q_double_spin_box(0.0, -30.0, 30.0, 0, 1.0)
self.pitch.valueChanged.connect(self.callback_spin_box)
add_widget_with_frame(left_vbox, self.pitch, "Pitch (deg):")
# end left column
left_vbox.addStretch()
hbox.addLayout(left_vbox, 1)
# start right column
right_vbox = QVBoxLayout()
# stop button
stop_command = QPushButton("Stop")
right_vbox.addWidget(stop_command)
# ignore collision
self.collision_checkbox = QCheckBox()
self.collision_checkbox.setText("Ignore Collision")
self.collision_checkbox.setChecked(True)
right_vbox.addWidget(self.collision_checkbox)
# override 3D
self.override_checkbox = QCheckBox()
self.override_checkbox.setText("Override 3D")
self.override_checkbox.setChecked(False)
right_vbox.addWidget(self.override_checkbox)
# end right coloumn
right_vbox.addStretch()
hbox.addLayout(right_vbox, 1)
# add upper part
hbox.setContentsMargins(0,0,0,0)
vbox = QVBoxLayout()
vbox.addLayout(hbox)
# parameter set selection
self.parameter_set_widget = QParameterSetWidget()
add_widget_with_frame(vbox, self.parameter_set_widget, "Parameter Set:")
# end widget
widget.setLayout(vbox)
#context.add_widget(widget)
# signal connections
start_command.clicked.connect(self.start_command_callback)
stop_command.clicked.connect(self.stop_command_callback)
self.joystick_mode_checkbox.clicked.connect(self.joystick_mode_check_callback)
self.ignore_invalid_steps_checkbox.clicked.connect(self._publish_parameters)
def shutdown_plugin(self):
print "Shutting down ..."
self.pattern_generator_params_pub.unregister()
print "Done!"
# message publisher
def _publish_parameters(self):
params = PatternGeneratorParameters()
params.enable = self.enable_pattern_generator
params.simulation_mode = self.simulation_mode_checkbox.isChecked()
params.joystick_mode = self.joystick_mode_checkbox.isChecked()
params.ignore_invalid_steps = self.ignore_invalid_steps_checkbox.isChecked()
params.cmd.linear.x = self.delta_x.value()
params.cmd.linear.y = self.delta_y.value()
params.cmd.linear.z = 0
params.cmd.angular.x = math.radians(self.roll.value())
params.cmd.angular.y = math.radians(self.pitch.value())
params.cmd.angular.z = math.radians(self.delta_yaw.value())
params.foot_seperation = self.foot_seperation.value()
params.parameter_set_name.data = self.parameter_set_widget.current_parameter_set_name()
print "Send stepping command = \n",params
self.pattern_generator_params_pub.publish(params)
# Define system command strings
def start_command_callback(self):
self.enable_pattern_generator = True
self._publish_parameters()
def stop_command_callback(self):
self.enable_pattern_generator = False
self._publish_parameters()
def callback_spin_box(self, value_as_int):
if self.realtime_mode_checkbox.isChecked():
self._publish_parameters()
def joystick_mode_check_callback(self):
self.enable_pattern_generator = False
self._publish_parameters()
|
mrquim/repository.mrquim | script.module.exodus/lib/resources/lib/sources/en/dizigold.py | Python | gpl-2.0 | 4,468 | 0.013429 | # NEEDS FIXING
# -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['dizigold.net', 'dizigold1.com']
self.base_link = 'http://www.dizigold2.com'
self.player_link = 'http://player.dizigold2.com/?id=%s&s=1&dil=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
result = cache.get(self.dizigold_tvcache, 120)
tvshowtitle = cleantitle.get(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def dizigold_tvcache(self):
try:
result = client.request(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0]
result = re.compile('href="(.+?)">(.+?)<').findall(result)
result = [(re.sub('http.+?//.+?/','/', i | [0]), re.sub('&#\d*;','', i[1])) for i in result]
result = [(i[0], cleantitle.get(i[1])) for i in result]
return result
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if url == None: return
url = '/%s/%01d-sezon/%01d-bolum' % (url.replace('/', ''), int(season), int(episode))
url = client.replaceHT | MLCodes(url)
url = url.encode('utf-8')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
base_url = urlparse.urljoin(self.base_link, url)
result = client.request(base_url)
id = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
for dil in ['tr', 'or', 'en']:
query = self.player_link % (id, dil)
result = client.request(query, referer=base_url)
try:
url = client.parseDOM(result, 'iframe', ret='src')[-1]
if 'openload' in url:
host = 'openload.co' ; direct = False ; url = [{'url': url, 'quality': 'HD'}]
elif 'ok.ru' in url:
host = 'vk' ; direct = True ; url = directstream.odnoklassniki(url)
elif 'vk.com' in url:
host = 'vk' ; direct = True ; url = directstream.vk(url)
else: raise Exception()
for i in url: sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
try:
url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
yongshengwang/hue | build/env/lib/python2.7/site-packages/nosetty-0.4-py2.7.egg/nosetty/test/nosepassthru.py | Python | apache-2.0 | 178 | 0.011236 | """a decoy python script that can be run like `python nosepassthru.py` to test using an executable chain"""
if __name__ == '__main__':
from nose.core import main
main | () | |
joshbohde/scikit-learn | sklearn/decomposition/nmf.py | Python | bsd-3-clause | 16,021 | 0.00025 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD
from __future__ import division
import warnings
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.extmath import fast_svd
def _pos(x):
"""Postive part of a vector / matrix"""
return (x >= 0) * x
def _neg(x):
"""Negative part of a vector / matrix"""
return (x < 0) * (-x)
def norm(x):
"""Dot product based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(np.dot(x.flatten().T, x.flatten()))
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
n = len(x)
return (np.sqrt(n) - np.linalg.norm(x, 1) / norm(x)) / (np.sqrt(n) - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6, random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X: array, [n_samples, n_features]
The data matrix to be decomposed.
n_components:
The number of components desired in the
approximation.
variant: None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard n | ormal random variates.
Default: None
eps:
Truncate all values less then this in output to zero.
random_state: numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H):
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
Remarks
-------
This implements the algorithm described | in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://www.cs.rpi.edu/~boutsc/files/nndsvd.pdf
"""
if (X < 0).any():
raise ValueError("Negative values in data passed to initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = fast_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in xrange(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = _pos(x), _pos(y)
x_n, y_n = _neg(x), _neg(y)
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.mtrand.RandomState(random_state)
elif not isinstance(random_state, np.random.mtrand.RandomState):
raise ValueError('Invalid random state in _nmf_initialize_')
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H_init, tol, max_iter):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W:
Constant matrices
H_init:
Initial guess for the solution
tol:
Tolerance of the stopping condition.
max_iter:
Maximum number of iterations before
timing out.
Returns
-------
H:
Solution to the non-negative least squares problem
grad:
The gradient.
n_iter:
The number of iterations done by the algorithm.
"""
if (H_init < 0).any():
raise ValueError("Negative values in H_init passed to NLS solver.")
H = H_init
WtV = np.dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
beta = 0.1
for n_iter in xrange(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
proj_gradient = norm(grad[np.logical_or(grad < 0, H > 0)])
if proj_gradient < tol:
break
for inner_iter in xrange(1, 20):
Hn = H - alpha * grad
# Hn = np.where(Hn > 0, Hn, 0)
Hn = _pos(Hn)
d = Hn - H
gradd = np.sum(grad * d)
dQd = np.sum(np.dot(WtW, d) * d)
# magic numbers whoa
suff_decr = 0.99 * gradd + 0.5 * dQd < 0
if inner_iter == 1:
decr_alpha = not suff_decr
Hp = H
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha = alpha * beta
else:
if not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha = alpha / beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
X: array, [n_samples, n_features]
Data the model will be fit to.
n_components: int or None
Number of components, if n_components is not set all components
are kept
init: 'nndsvd' | 'nndsvda' | 'nndsvdar' | int | RandomState
Method used to initialize the procedure.
Default: 'nndsvdar'
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
int seed or RandomState: non-negative random matrices
sparseness: 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta: double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta: double, default: 0.1
Degree of correctness to mantain, if sparsity is not None. Smaller
values mean larger error.
tol: double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter: int, default: 200
Number of iterations to compute.
nls_max_iter: int, default: 2000
Number of iterations in NLS subproblem.
Attributes
----------
components_: array, [n_components, n_features]
Non-negative components of the data
reconstruction_err_: number
Frobenius norm of the matrix difference between the
training data and the reconstructed data f |
vbuell/python-javaobj | javaobj.py | Python | apache-2.0 | 31,597 | 0.003007 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides functions for reading (writing is WIP currently) of Java
objects serialized by ObjectOutputStream. This form of object
representation is a standard data interchange format in Java world.
javaobj module exposes an API familiar to users of the standard modules
such as marshal, pickle and json.
See: http://download.oracle.com/javase/6/docs/platform/serialization/spec/protocol.html
"""
import StringIO
import struct
try:
import logging
except ImportError:
def log_debug(message, ident=0):
pass
def log_error(message, ident=0):
pass
else:
_log = logging.getLogger(__name__)
def log_debug(message, ident=0):
_log.debug(" " * (ident * 2) + str(message))
def log_error(message, ident=0):
_log.error(" " * (ident * 2) + str(message))
__version__ = "$Revision: 20 $"
def load(file_object, *args):
"""
Deserializes Java primitive data and objects serialized by ObjectOutputStream
from a file-like object.
"""
marshaller = JavaObjectUnmarshaller(file_object)
for t in args:
marshaller.add_transformer(t)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
def load_all(file_object):
marshaller = JavaObjectUnmarshaller(file_object)
marshaller.add_transformer(DefaultObjectTransformer())
res = []
while marshaller.data_left:
res.append(marshaller.readObject())
return res
def loads(string, *args):
"""
Deserializes Java objects and primitive data serialized by ObjectOutputStream
from a string.
"""
f = StringIO.StringIO(string)
marshaller = JavaObjectUnmarshaller(f)
for t in args:
marshaller.add_transformer(t)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
def dumps(object, *args):
"""
Serializes Java primitive data and objects unmarshaled by load(s) before into string.
"""
marshaller = JavaObjectMarshaller()
for t in args:
marshaller.add_transformer(t)
return marshaller.dump(object)
class JavaClass(object):
def __init__(self):
self.name = None
self.serialVersionUID = None
self.flags = None
self.handle = None
self.fields_names = []
self.fields_types = []
self.superclass = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return "[%s:0x%X]" % (self.name, self.serialVersionUID)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.name == other.name and
self.serialVersionUID == other.serialVersionUID and
self.flags | == other.flags and
self.fields_names == other.fields | _names and
self.fields_types == other.fields_types and
self.superclass == other.superclass)
class JavaObject(object):
def __init__(self):
self.classdesc = None
self.annotations = []
def get_class(self):
return self.classdesc
def __str__(self):
return self.__repr__()
def __repr__(self):
name = "UNKNOWN"
if self.classdesc:
name = self.classdesc.name
return "<javaobj:%s>" % name
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
res = (self.classdesc == other.classdesc and
self.annotations == other.annotations)
for name in self.classdesc.fields_names:
res = (res and
getattr(self, name) == getattr(other, name))
return res
def copy(self, new_object):
new_object.classdesc = self.classdesc
new_object.annotations = self.annotations
for name in self.classdesc.fields_names:
new_object.__setattr__(name, getattr(self, name))
class JavaString(str):
def __init__(self, *args, **kwargs):
str.__init__(self, *args, **kwargs)
def __eq__(self, other):
if not isinstance(other, str):
return False
return str.__eq__(self, other)
class JavaEnum(JavaObject):
def __init__(self, constant=None):
super(JavaEnum, self).__init__()
self.constant = constant
class JavaArray(list, JavaObject):
def __init__(self, classdesc=None):
list.__init__(self)
JavaObject.__init__(self)
self.classdesc = classdesc
class JavaObjectConstants:
STREAM_MAGIC = 0xaced
STREAM_VERSION = 0x05
TC_NULL = 0x70
TC_REFERENCE = 0x71
TC_CLASSDESC = 0x72
TC_OBJECT = 0x73
TC_STRING = 0x74
TC_ARRAY = 0x75
TC_CLASS = 0x76
TC_BLOCKDATA = 0x77
TC_ENDBLOCKDATA = 0x78
TC_RESET = 0x79
TC_BLOCKDATALONG = 0x7A
TC_EXCEPTION = 0x7B
TC_LONGSTRING = 0x7C
TC_PROXYCLASSDESC = 0x7D
TC_ENUM = 0x7E
TC_MAX = 0x7E
# classDescFlags
SC_WRITE_METHOD = 0x01 # if SC_SERIALIZABLE
SC_BLOCK_DATA = 0x08 # if SC_EXTERNALIZABLE
SC_SERIALIZABLE = 0x02
SC_EXTERNALIZABLE = 0x04
SC_ENUM = 0x10
# type definition chars (typecode)
TYPE_BYTE = 'B' # 0x42
TYPE_CHAR = 'C'
TYPE_DOUBLE = 'D' # 0x44
TYPE_FLOAT = 'F' # 0x46
TYPE_INTEGER = 'I' # 0x49
TYPE_LONG = 'J' # 0x4A
TYPE_SHORT = 'S' # 0x53
TYPE_BOOLEAN = 'Z' # 0x5A
TYPE_OBJECT = 'L' # 0x4C
TYPE_ARRAY = '[' # 0x5B
# list of supported typecodes listed above
TYPECODES_LIST = [
# primitive types
TYPE_BYTE,
TYPE_CHAR,
TYPE_DOUBLE,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_LONG,
TYPE_SHORT,
TYPE_BOOLEAN,
# object types
TYPE_OBJECT,
TYPE_ARRAY ]
BASE_REFERENCE_IDX = 0x7E0000
class JavaObjectUnmarshaller(JavaObjectConstants):
def __init__(self, stream=None):
self.opmap = {
self.TC_NULL: self.do_null,
self.TC_CLASSDESC: self.do_classdesc,
self.TC_OBJECT: self.do_object,
self.TC_STRING: self.do_string,
self.TC_LONGSTRING: self.do_string_long,
self.TC_ARRAY: self.do_array,
self.TC_CLASS: self.do_class,
self.TC_BLOCKDATA: self.do_blockdata,
self.TC_BLOCKDATALONG: self.do_blockdata_long,
self.TC_REFERENCE: self.do_reference,
self.TC_ENUM: self.do_enum,
self.TC_ENDBLOCKDATA: self.do_null, # note that we are reusing of do_null
}
self.current_object = None
self.reference_counter = 0
self.references = []
self.object_stream = stream
self._readStreamHeader()
self.object_transformers = []
self.data_left = True
def readObject(self):
try:
opcode, res = self._read_and_exec_opcode(ident=0) # TODO: add expects
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if len(the_rest):
log_error("Warning!!!!: Stream still has %s bytes left. Enable debug mode of logging to see the hexdump." % len(the_rest))
log_debug(self._create_hexdump(the_rest, position_bak))
self.data_left = True
else:
log_debug("Java Object unmarshalled succesfully!")
self.data_left = False
self.object_stream.seek(position_bak)
return res
except Exception as e:
self._oops_dump_state()
raise
de |
dimagi/commcare-hq | corehq/tabs/templatetags/menu_tags.py | Python | bsd-3-clause | 4,847 | 0.000413 | from corehq.apps.users.models import DomainMembershipError
from django import template
from django.template.loader import render_to_string
from corehq.tabs.config import MENU_TABS
from corehq.tabs.exceptions import TabClassError, TabClassErrorSummary
from corehq.tabs.extension_points import uitab_classes
from corehq.tabs.utils import path_starts_with_url
register = template.Library()
def _get_active_tab(visible_tabs, request_path):
"""
return the tab that claims the longest matching url_prefix
if one tab claims
'/a/{domain}/data/'
and another tab claims
'/a/{domain}/data/edit/case_groups/'
then the second tab wins because it's a longer match.
"""
matching_tabs = sorted(
(url_prefix, tab)
for tab in visible_tabs
for url_prefix in tab.url_prefixes
if request_path.startswith(url_prefix)
)
if matching_tabs:
_, tab = matching_tabs[-1]
return tab
def get_all_tabs(request, domain, couch_user, project):
"""
instantiate all UITabs, and aggregate all their TabClassErrors (if any)
into a single TabClassErrorSummary
this makes it easy to get a list of all configuration issues
and fix them in one cycle
"""
all_tabs = []
instantiation_errors = []
tab_classes = list(MENU_TABS)
tab_classes.ext | end(uitab_classes())
for tab_class in tab_classes:
try:
tab = tab_class(
request, domain=domain,
couch_user=couch_user, project=project)
except TabClassError as e:
instantiation_errors.append(e)
else:
all_tabs.append(tab)
if instantiation_e | rrors:
messages = (
'- {}: {}'.format(e.__class__.__name__, str(e))
for e in instantiation_errors
)
summary_message = 'Summary of Tab Class Errors:\n{}'.format('\n'.join(messages))
raise TabClassErrorSummary(summary_message)
else:
return all_tabs
class MainMenuNode(template.Node):
def render(self, context):
request = context['request']
couch_user = getattr(request, 'couch_user', None)
project = getattr(request, 'project', None)
domain = context.get('domain')
all_tabs = get_all_tabs(request, domain=domain, couch_user=couch_user,
project=project)
active_tab = _get_active_tab(all_tabs, request.get_full_path())
if active_tab:
active_tab.is_active_tab = True
visible_tabs = [tab for tab in all_tabs if tab.should_show()]
# set the context variable in the highest scope so it can be used in
# other blocks
role_version = None
try:
if couch_user:
user_role = couch_user.get_role(domain, allow_enterprise=True)
role_version = user_role.cache_version if user_role else None
except DomainMembershipError:
role_version = None
context.dicts[0]['active_tab'] = active_tab
flat = context.flatten()
flat.update({
'tabs': visible_tabs,
'role_version': role_version
})
return render_to_string('tabs/menu_main.html', flat)
@register.tag(name="format_main_menu")
def format_main_menu(parser, token):
return MainMenuNode()
@register.simple_tag(takes_context=True)
def format_sidebar(context):
current_url_name = context['current_url_name']
active_tab = context.get('active_tab', None)
request = context['request']
sections = active_tab.filtered_sidebar_items if active_tab else None
if sections:
# set is_active on active sidebar item by modifying nav by reference
# and see if the nav needs a subnav for the current contextual item
for section_title, navs in sections:
for nav in navs:
full_path = request.get_full_path()
if path_starts_with_url(full_path, nav['url']):
nav['is_active'] = True
else:
nav['is_active'] = False
if 'subpages' in nav:
for subpage in nav['subpages']:
if subpage['urlname'] == current_url_name:
if callable(subpage['title']):
actual_context = {}
for d in context.dicts:
actual_context.update(d)
subpage['is_active'] = True
subpage['title'] = subpage['title'](**actual_context)
nav['subpage'] = subpage
break
return render_to_string(
'hqwebapp/partials/navigation_left_sidebar.html',
{'sections': sections}
)
|
scottdanesi/earthshaker-aftershock | procgame/fakepinproc.py | Python | gpl-3.0 | 7,383 | 0.033862 | import time
import pinproc
import Queue
from game import gameitems
class FakePinPROC(object):
"""Stand-in class for :class:`pinproc.PinPROC`. Generates DMD events."""
last_dmd_event = 0
frames_per_second = 60
drivers = gameitems.AttrCollection()
switch_events = []
switch_rules = [{'notifyHost':False, 'drivers':[]}] * 1024
"""List of events"""
"""Frames per second at which to dispatch :attr:`pinproc.EventTypeDMDFrameDisplayed` events."""
def __init__(self, machine_type):
# Instantiate 256 drivers.
for i in range(0, 256):
name = 'driver' + str(i)
self.drivers.add(name, gameitems.VirtualDriver(None, name, i, True))
def noop(self, *args, **kwargs):
""" Empty method used when no virtual equivalent to a pypinproc method is necessary. This allows a game to switch back and forth between pypinproc and this fakepinproc class without modification. """
pass
def switch_get_states(self, *args):
""" Method to provide default switch states. """
return [0] * 256
def get_events(self):
""" Get all switch and DMD events since the last time this was called. """
events = []
events.extend(self.switch_events)
self.switch_events = []
now = time.time()
seconds_since_last_dmd_event = now - self.last_dmd_event
missed_dmd_events = min(int(seconds_since_last_dmd_event*float(self.frames_per_second)), 16)
if missed_dmd_events > 0:
self.last_dmd_event = now
events.extend([{'type':pinproc.EventTypeDMDFrameDisplayed, 'value':0}] * missed_dmd_events)
return events
def driver_pulse(self, number, milliseconds):
""" Send a pulse command to a virtual driver. """
self.drivers[number].pulse(milliseconds)
def driver_schedule(self, number, schedule, cycle_seconds, now):
""" Send a schedule command to a virtual driver. """
self.drivers[number].schedule(schedule, cycle_seconds, now)
def driver_disable(self, number):
""" Send a disable command to a virtual driver. """
self.drivers[number].disable()
def driver_get_state(self, number):
""" Return the state dictionary for the specified driver. """
return self.drivers[number].state
# Switch rule methods
def switch_update_rule(self, num, state, rule_params, drivers, drive_outputs_now=False):
""" Stores P-ROC switch rules in an internal switch_rules list. """
# Convert the pyprocgame event name to a pinproc event.
if state == 'closed_debounced':
pr_state = pinproc.EventTypeSwitchClosedDebounced
elif state == 'open_debounced':
pr_state = pinproc.EventTypeSwitchOpenDebounced
elif state == 'closed_nondebounced':
pr_state = pinproc.EventTypeSwitchClosedNondebounced
else: pr_state = pinproc.EventTypeSwitchOpenNondebounced
# Find the appropriate switch rule entry to overwrite
rule_index = ((pr_state-1) * 256) + num
notify = rule_params['notifyHost']
# Copy the list so that unique driver lists are stored
# in each switch rule entry.
driverlist = list(drivers)
# Overwrite the existing rule with this new one.
self.switch_rules[rule_index] = {'notifyHost':notify, 'drivers':driverlist}
return True
def add_switch_event(self, number, event_type):
""" Called by the simulating element to send in a switch event. """
# Event types start at 1; so use it to figure out in which
# 256 rule block to find the rule for this event.
rule_index = ((event_type-1) * 256) + number
# If the event says to notify host, add it to the list
# of pending events.
if self.switch_rules[rule_index]['notifyHost']:
event = {'type':event_type, 'value':number}
self.switch_events.append(event)
# Now see if the switch rules indicate one or more drivers
# needs to change.
drivers = self.switch_rules[rule_index]['drivers']
for driver_rule in drivers:
self.drivers[driver_rule['driverNum']].update_state(driver_rule)
def watchdog_tickle(self):
""" This method contains things that need to happen every iteration of a game's runloop. """
for driver in self.drivers: driver.tick()
def __getattr__(self, name):
if name == 'get_events':
return self.get_events
elif name == 'switch_get_states':
return self.switch_get_states
else:
return self.noop
class FakePinPROCPlayback(FakePinPROC):
""" FakePinPROCPlayback offers the functionality to play back switch
events from a switch record file taken from real gameplay.
The class subclasses fakepinproc to maintain the same functionality and
interop by simply changing the proc class in config.yaml
"""
_start_time = 0 # The simulator start time so we know how to calculate simulator time
_playback_file = None # Playback file object that we read from
_events = dict() # We store events in a dictionary keyed by their simulator time
_event_timestamps = None # Event timestamps are stored in a list so they can be sorted so we access the dictionary in order.
_states = [0] * 256 # Local switch state repository
def __init__(self, machine_type):
super(FakePinPROCPlayback, self).__init__(machine_type)
self._states = [0] * 256 # Initialize all switch values to 0
self._playback_file = open("playback.txt", 'r') # Open our playback file for reading
self._parse_playback_file() # Parse the playback file to get our initial switch states and load all events into memory
self._playback_file.close() # Close the playback file after reading into memory
self._event_timestamps = self._events.keys() # Populate our list of timestamps from the events dictionary keys
self._event_timestamps.sort() # Sort timestamps from least to greatest so we access all events in order
self._start_time = (time.clock() * 1000) # Mark down the current start time so we know | when to process an event
def switch_get_states(self, *args):
""" Method to provide current simulator switch states. """
return self._states
def get_events(self):
# Populate the events list from our fakepinproc DMD events, etc
events = super(FakePinPROCPlayback, self).get_events()
# Mark down the current time so we can check whether or not we should fire an event yet
current_time = self._get_current_s | imulator_time()
# Loop through all events that we should execute now
while len(self._event_timestamps) > 0 and self._event_timestamps[0] <= current_time:
evt = self._events[self._event_timestamps[0]]
print "[%s] [%s] Firing switch %s" % (str(current_time),str(self._event_timestamps[0]), evt['swname'])
# Add the event to the event queue
events.append(evt)
# Remove the already processed events from our data structures so we don't process them again
del self._events[self._event_timestamps[0]]
del self._event_timestamps[0]
return events
def _get_current_simulator_time(self):
return (time.clock() * 1000) - self._start_time
def _parse_playback_file(self):
line = self._playback_file.readline()
while line:
line = line.strip()
evt = line.split("|")
if len(evt) == 2:
# This is a switch state declaration
swnum = int(evt[0])
swval = int(evt[1])
self._states[swnum] = swval
elif len(evt) >= 4:
# This is an actual event to schedule
procEvent = dict()
procEvent['type'] = int(evt[1])
procEvent['value'] = int(evt[2])
procEvent['swname'] = evt[3]
if len(evt) >= 5:
procEvent['time'] = evt[4]
self._events[float(evt[0])] = procEvent
line = self._playback_file.readline()
|
FedeMPouzols/Savu | savu/data/data_structures/data_notes.py | Python | gpl-3.0 | 7,157 | 0.00014 | # Copyright 2015 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_notes
:platform: Unix
:synopsis: A module containing extended doc strings for the data module.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
def _set_preview_note():
"""
Each ``preview_list`` element should be of the form
``start:stop:step:chunk``, where ``stop``, ``step`` and ``chunk`` are
optional (defaults: ``stop``=``start``+ 1, ``step``= 1, ``chunk`` = 1)
but must be given in that order.
.. note::
**start:stop[:step]* | *
represents the set of indices specified by:
>>> indices = range(start, stop[, step])
For more information see :func:`range`
**start:stop:step:chunk (chunk > 1)**
represents the set of indices specified by:
>>> a = np.tile(np.arange(start, stop, step), (chunk, 1))
>>> b = np.transpose(np.tile(np.arange(chunk)-chunk/2, \
(a.shape[1], 1)))
| >>> indices = np.ravel(np.transpose(a + b))
Chunk indicates how many values to take around each value in
``range(start, stop, step)``. It is only available for slicing
dimensions.
.. warning:: If any indices are out of range (or negative)
then the list is invalid. When chunk > 1, new start and
end values will be:
>>> new_start = start - int(chunk/2)
>>> new_end = range(start, stop, step)[-1] + \
(step - int(chunk/2))
**accepted values**:
Each entry is executed using :func:`eval` so simple formulas are\
allowed and may contain the following keywords:
* ``:`` is a simplification for 0:end:1:1 (all values)
* ``mid`` is int(shape[dim]/2)-1
* ``end`` is shape[dim]
"""
def image_key():
"""
This is a helper function to be used after :meth:`savu.data.\
data_structures.data_create.DataCreate.create_dataset`,
>>> out_dataset[0].create_dataset(in_dataset[0])
>>> out_dataset[0].trim_output_data(in_dataset[0], image_key=0)
if in_dataset[0] is a plugin input dataset with an image_key and 0 is
the data index.
"""
def _create():
"""
.. note:: **Creating a dataset**
Each new dataset requires the following information:
* ``shape``
* ``axis_labels``
* ``patterns``
This function can be used to setup the required information in one
of two ways:
1. Passing a ``Data`` object as the only argument: All required
information is coped from this data object. For example,
>>> out_dataset[0].create_dataset(in_dataset[0])
2. Passing kwargs: ``shape`` and ``axis_labels`` are required
(see above for other optional arguments). For example,
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=new_shape)
.. warning:: If ``pattern`` keyword is not used, patterns must be added
after :meth:`~savu.data.data_structures.data_create.DataCreate.\
create_dataset` by calling :func:`~savu.data.data_structures.data.Data.\
add_pattern`.
"""
def _shape():
"""
.. note::
``shape`` keyword argument
Options to pass are:
1. Data object: Copy shape from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=in_dataset[0])
2. tuple: Define shape explicity.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=(10, 20, 30))
"""
def axis_labels():
"""
.. note::
``axis_labels`` keyword argument
Options to pass are:
1. Data object: Copy all labels from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=in_dataset[0], \
shape=new_shape)
2. {Data_obj: list}: Copy labels from the Data object and then
remove or insert.
* To remove dimensions: list_entry = 'dim'. For example, to
remove the first and last axis_labels from the copied list:
>>> out_dataset[0].create_dataset(axis_labels=\
{in_dataset[0]: ['1', '-1']), shape=new_shape})
* To add/replace dimensions: list_entry = 'dim.name.unit'.
>>> out_dataset[0].create_dataset(axis_labels={in_dataset[0]: \
['2.det_x.pixel', '3.det_y.pixel']}, shape=new_shape)
* To insert dimensions: list_entry = '~dim.name.unit'.
>>> out_dataset[0].create_dataset(axis_labels={in_dataset[0]: \
['~2.det_x.pixel', '~3.det_y.pixel']}, shape=new_shape)
(or a combination, where each successive step is applied after
the previous changes have been made.)
3. list: Where each element is of the form 'name.unit'.
>>> out_dataset[0].create_dataset(axis_labels=['rotation.deg',\
'det_x.pixel', 'det_y.pixel'], shape=new_shape)
"""
def patterns():
"""
.. note::
``patterns`` keyword argument
Options to pass are:
1. Data object: Copy all patterns from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=new_shape, patterns=in_dataset[0])
2. {Data_obj: list}: Copy only the patterns given in the list
from the Data object.
* Copy the patterns: list_entry = 'name'
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], ['SINOGRAM', 'PROJECTION']})
* Copy patterns but remove dimensions: list_entry = \
'name1.r1,r2...':
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], ['SINOGRAM.1', 'PROJECTION.1']})
* Copy ALL patterns but remove dimensions: list_entry = \
'\*.r1,r2...':
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], '*.0'})
"""
def _padding():
"""
Either 'dim.pad', 'dim.after.pad' or 'dim.before.pad', where 'dim' and\
'pad' are integers and give the dimension to pad and the the pad \
amount respectively. The keywords 'before' and 'after' specify padding\
'before' or 'after' the original dimension index (if neither are\
specified the both directions will be padded. """
|
herilalaina/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 46,964 | 0.000043 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is | not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions. |
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the commo |
RPGOne/Skynet | scikit-learn-0.18.1/examples/exercises/plot_cv_diabetes.py | Python | bsd-3-clause | 2,861 | 0.002447 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target | [:1 | 50]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
scores = list()
scores_std = list()
n_folds = 3
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_val_score(lasso, X, y, cv=n_folds, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
scores, scores_std = np.array(scores), np.array(scores_std)
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
French77/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.services/resources/lib/osmcservices/__init__.py | Python | gpl-2.0 | 279 | 0 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2020 OSMC (KodeKarnage)
This f | ile is part of script.module.osmcsetting.services
SPDX-License-Identifier: GPL-2.0-or-later
See LICENSES/GPL-2.0-or-later for more information.
"""
__all__ = ['service | s_gui', 'osmc']
|
quora/qcore | qcore/enum.py | Python | apache-2.0 | 12,142 | 0.001235 | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Enum implementation.
"""
_ | _all__ = ["Enum", "EnumType", "EnumValueGenerator", "Flags", "IntEnum"]
import inspect
import sys
from . import helpers
from . import inspection
_no_default = helpers.MarkerObject("no_default @ enums")
class EnumType(type):
"""Metaclass for all enum types."""
def __init__(cls, what, bases=None, dict=None):
super().__init__(what, bases, dict)
cls.process()
def __contains__(self, k):
return k in self._value_to_name
def __len__(self) | :
return len(self._members)
def __iter__(self):
return iter(self._members)
def __call__(self, value, default=_no_default):
"""Instantiating an Enum always produces an existing value or throws an exception."""
return self.parse(value, default=default)
def process(self):
name_to_member = {}
value_to_member = {}
value_to_name = {}
flag_values = []
members = []
for k, v in list(inspect.getmembers(self)):
# ensure that names are unicode, even in py2
if isinstance(k, bytes):
k = k.decode("ascii")
if isinstance(type(v), EnumType):
v = v.value # For inherited members
if isinstance(v, int):
assert (
v not in value_to_member
), "Duplicate enum value: %s (class: %s)." % (
v,
inspection.get_full_name(self),
)
member = self._make_value(v)
name_to_member[k] = member
value_to_member[v] = member
value_to_name[v] = k
if v != 0:
flag_values.append(v)
members.append(member)
self._name_to_member = name_to_member
self._value_to_member = value_to_member
self._value_to_name = value_to_name
self._flag_values = list(reversed(sorted(flag_values)))
self._members = sorted(members, key=lambda m: m.value)
for m in members:
setattr(self, m.short_name, m)
def _make_value(self, value):
"""Instantiates an enum with an arbitrary value."""
member = self.__new__(self, value)
member.__init__(value)
return member
# Needed bcz of a six bug: https://github.com/benjaminp/six/issues/252
@classmethod
def __prepare__(cls, name, bases, **kwargs):
return {}
class EnumBase(metaclass=EnumType):
_name_to_member = {}
_value_to_member = {}
_value_to_name = {}
_flag_values = []
_members = []
def __init__(self, value):
self.value = int(value)
@property
def short_name(self):
"""Returns the enum member's name, like "foo"."""
raise NotImplementedError
@property
def long_name(self):
"""Returns the enum member's name including the class name, like "MyEnum.foo"."""
return "%s.%s" % (self.__class__.__name__, self.short_name)
@property
def title(self):
"""Returns the enum member's name in title case, like "FooBar" for MyEnum.foo_bar."""
return self.short_name.replace("_", " ").title()
@property
def full_name(self):
"""Returns the enum meber's name including the module, like "mymodule.MyEnum.foo"."""
return "%s.%s" % (self.__class__.__module__, self.long_name)
def is_valid(self):
raise NotImplementedError
def assert_valid(self):
if not self.is_valid():
raise _create_invalid_value_error(self.__class__, self.value)
def __int__(self):
return self.value
def __call__(self):
return self.value
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __hash__(self):
return hash(self.value)
def __str__(self):
if self.is_valid():
return self.short_name
else:
return "%s(%s)" % (self.__class__.__name__, self.value)
def __repr__(self):
if self.is_valid():
return self.__class__.__name__ + "." + self.short_name
else:
return "%s(%s)" % (self.__class__.__name__, self.value)
@classmethod
def get_names(cls):
"""Returns the names of all members of this enum."""
return [m.short_name for m in cls._members]
@classmethod
def get_members(cls):
return cls._members
@classmethod
def create(cls, name, members):
"""Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
"""
NewEnum = type(name, (cls,), {})
if isinstance(members, dict):
members = members.items()
for member in members:
if isinstance(member, tuple):
name, value = member
setattr(NewEnum, name, value)
elif isinstance(member, EnumBase):
setattr(NewEnum, member.short_name, member.value)
else:
assert False, (
"members must be either a dict, "
+ "a list of (name, value) tuples, "
+ "or a list of EnumBase instances."
)
NewEnum.process()
# needed for pickling to work (hopefully); taken from the namedtuple implementation in the
# standard library
try:
NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return NewEnum
@classmethod
def parse(cls, value, default=_no_default):
"""Parses a value into a member of this enum."""
raise NotImplementedError
def __reduce_ex__(self, proto):
return self.__class__, (self.value,)
class Enum(EnumBase):
def is_valid(self):
return self.value in self._value_to_member
@property
def short_name(self):
self.assert_valid()
return self._value_to_name[self.value]
@classmethod
def parse(cls, value, default=_no_default):
"""Parses an enum member name or value into an enum member.
Accepts the following types:
- Members of this enum class. These are returned directly.
- Integers. If there is an enum member with the integer as a value, that member is returned.
- Strings. If there is an enum member with the string as its name, that member is returned.
For integers and strings that don't correspond to an enum member, default is returned; if
no default is given the function raises KeyError instead.
Examples:
>>> class Color(Enum):
... red = 1
... blue = 2
>>> Color.parse(Color.red)
Color.red
>>> Color.parse(1)
Color.red
>>> Color.parse('blue')
Color.blue
"""
if isinstance(value, cls):
return value
elif isinstance(value, int) and not isinstance(value, EnumBase):
e = cls._value_to_member.get(value, _no_default)
else:
e = cls._name_to_member.get(value, _no_default)
if e is |
hpe-storage/python-lefthandclient | test/test_HPELeftHandClient_system.py | Python | apache-2.0 | 1,222 | 0 | # (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test class of LeftHand Client system level APIs """
import test_HPELeftHandClient_base
class HPELeftHandClientSystemTestCase(test_HPELeftHandClient_base.
HPELeftHandClientBaseTestCase):
def setUp(self):
super(HPELeftHandClientSystemTestCase, self).setUp()
def tearDown(self | ):
super(HPELeftHandClientSystemTestCase, self).tearDown()
def test_1_get_api_versi | on(self):
self.printHeader('get_api_version')
version = self.cl.getApiVersion()
self.assertTrue(version is not None)
self.printFooter('get_api_version')
|
tschaume/ccsgp_get_started | ccsgp_get_started/examples/gp_ptspec.py | Python | mit | 10,339 | 0.025728 | import logging, argparse, os, sys, re
import numpy as np
from collections import OrderedDict
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_panel, make_plot
from ..ccsgp.utils import getOpts
from ..ccsgp.config import default_colors
from decimal import Decimal
import uncertainties.umath as umath
import uncertainties.unumpy as unp
from fnmatch import fnmatch
def getMeeLabel(s):
if s == 'pi0': return '{/Symbol \160}^0'
if s == 'omega': return '{/Symbol \167}'
if s == 'phi': return '{/Symbol \152}'
if s == 'jpsi': return 'J/{/Symbol \171}'
return s
def splitFileName(fn):
# energy, mee_name, mee_range, data_type
split_arr = fn.split('_')
return (
re.compile('\d+').search(split_arr[0]).group(),
split_arr[1], split_arr[2],
re.compile('(?i)[a-z]+').search(split_arr[0]).group()
)
def getSubplotTitle(mn, mr):
return ' '.join([getMeeLabel(mn), ':', mr, ' GeV/c^{2}'])
def gp_ptspec():
"""example for a 2D-panel plot etc."""
fenergies = ['19', '27', '39', '62', ]# '200']
nen = len(fenergies)
mee_keys = ['pi0', 'LMR', 'omega', 'phi', 'IMR', 'jpsi']
#mee_keys = ['LMR', ]
mee_dict = OrderedDict((k,'') for k in mee_keys)
yscale = { '200': '300', '62': '5000', '39': '50', '27': '0.3', '19': '0.001' }
inDir, outDir = getWorkDirs()
data, data_avpt, dpt_dict = {}, {}, {}
yvals, yvalsPt = [], []
scale = {
'19': 1.3410566491548412, '200': 1.0, '39': 1.2719203877292842,
'27': 1.350873678084769, '62': 1.2664666321635087
}
lmr_label = None
for filename in os.listdir(inDir):
# import data
file_url = os.path.join(inDir, filename)
filebase = os.path.splitext(filename)[0] # unique
energy, mee_name, mee_range, data_type = splitFileName(filebase)
if mee_name == 'LMR':
mee_range_split = map(float, mee_range.split('-'))
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % (
mee_range_split[0], mee_range_split[1]
)
if energy == '200': continue
if mee_name not in mee_keys: continue
mee_dict[mee_name] = mee_range
data[filebase] = np.loadtxt(open(file_url, 'rb'))
if data_type == 'data':
#print data[filebase]
data[filebase] = data[filebase][:-1] # skip mT<0.4 point
if energy == '200': data[filebase][:,(1,3,4)] /= 0.5
# calculate average pT first
mask = (data[filebase][:,0] > 0.4) & (data[filebase][:,0] < 2.2)
avpt_data = data[filebase][mask]
pTs = avpt_data[:,0]
wghts = avpt_data[:,1]
probs = unp.uarray(avpt_data[:,1], avpt_data[:,3]) # dN/pT
probs /= umath.fsum(probs) # probabilities
avpt = umath.fsum(pTs*probs)
logging.info(('%s: {} %g' % (
filebase, np.average(pTs, weights = wghts)
)).format(avpt)) # TODO: syst. uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [ float(getEnergy4Key(energy)), avpt.nominal_value, 0., avpt.std_dev, 0. ]
avpt_key = mee_name
if data_type == 'cocktail': avpt_key += '_c'
if data_type == 'medium': avpt_key += '_m'
if data_type == 'mediumMedOnly': avpt_key += '_mMed'
if data_type == 'mediumQgpOnly': avpt_key += '_mQgp'
if avpt_key in data_avpt: data_avpt[avpt_key].append(dp)
else: data_avpt[avpt_key] = [ dp ]
yvalsPt.append(avpt.nominal_value)
# now adjust data for panel plot and append to yvals
if data_type != 'data':
data[filebase][:,(1,3,4)] /= scale[energy]
data[filebase][:,(1,3,4)] *= float(yscale[energy])
if data_type == 'cocktail' or fnmatch(data_type, '*medium*'):
data[filebase][:,2:] = 0.
yvals += [v for v in data[filebase][:,1] if v > 0]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle(mee_name, mee_range)
if dpt_dict_key not in dpt_dict:
ndsets = nen*2
# TODO: currently only 19/39/62 medium avail. w/ med/qgp/tot for each
# July14: all energies available; TODO: fix dsidx
if mee_name == 'LMR': ndsets += 4*3
dpt_dict[dpt_dict_key] = [ [None]*ndsets, [None]*ndsets, [None]*ndsets ]
enidx = fenergies.index(energy)
dsidx = enidx
if fnmatch(data_type, '*medium*'):
# 19: 0-2, 27: 3-5, 39: 6-8, 62: 9-11
dsidx = (energy=='19')*0 + (energy=='27')*3 + (energy=='39')*6 + (energy=='62')*9
dsidx += (data_type=='mediumQgpOnly')*0 + (data_type=='mediumMedOnly')*1
dsidx += (data_type=='medium')*2
else:
dsidx += int(mee_name == 'LMR') * 4 * 3 # number of medium calc avail.
dsidx += int(data_type == 'data') * len(fenergies)
dpt_dict[dpt_dict_key][0][dsidx] = data[filebase] # data
if data_type == 'data': # properties
dpt_dict[dpt_dict_key][1][dsidx] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[enidx]
elif data_type == 'medium':
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt 1 lw 5 lc %s' % default_colors[enidx]
else:
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt %d lw 5 lc %s' % (
2+(data_type=='mediumMedOnly')+(data_type=='mediumQgpOnly')*2, default_colors[enidx]
)
dpt_dict[dpt_dict_key][2][dsidx] = ' '.join([ # legend titles
getEnergy4Key(energy), 'GeV', '{/Symbol \26 | 4} %g' % (
Decimal(yscale[energy])#.as_tuple().exponent
)
]) if data_type == 'data' else ''
# use mass range in dict key to sort dpt_dict with increasing mass
plot_key_order = dpt_dict.keys()
plot_key_order.sort(key=lambda x: float(x.split(':')[1].split('-')[0]))
# so | rt data_avpt by energy and apply x-shift for better visibility
for k in data_avpt: data_avpt[k].sort(key=lambda x: x[0])
energies = [ dp[0] for dp in data_avpt[mee_keys[0]] ]
energies.append(215.) # TODO: think of better upper limit
linsp = {}
for start,stop in zip(energies[:-1],energies[1:]):
linsp[start] = np.linspace(start, stop, num = 4*len(mee_keys))
for k in data_avpt:
key = k.split('_')[0]
for i in xrange(len(data_avpt[k])):
data_avpt[k][i][0] = linsp[energies[i]][mee_keys.index(key)]
# make panel plot
yMin, yMax = 0.5*min(yvals), 3*max(yvals)
make_panel(
dpt_dict = OrderedDict((k,dpt_dict[k]) for k in plot_key_order),
name = os.path.join(outDir, 'ptspec'),
ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
ylog = True, xr = [0, 2.2], yr = [1e-9, 1e4],
#lmargin = 0.12, bmargin = 0.10, tmargin = 1., rmargin = 1.,
key = ['bottom left', 'samplen 0.5', 'width -2', 'opaque'],
arrow_bar = 0.002, layout = '3x2', size = '8in,8in'
)
#make plot for LMR spectra only
#lmr_key = getSubplotTitle('LMR', '0.4-0.76')
#if energy == '200':
# lmr_key = getSubplotTitle('LMR', '0.3-0.76')
#pseudo_point = np.array([[-1,0,0,0,0]])
#model_titles = ['Cocktail + Model', 'Cocktail', 'in-Medium', 'QGP']
#model_props = [
# 'with lines lc %s lw 5 lt %d' % (default_colors[-2], i+1)
# for i in xrange(len(model_titles))
#]
#make_plot(
# data = dpt_dict[lmr_key][0] + [ pseudo_point ] * len(model_titles),
# properties = dpt_dict[lmr_key][1] + model_props,
# titles = dpt_dict[lmr_key][2] + model_titles,
# name = os.path.join(outDir, 'ptspecLMR'),
# ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
# xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
# ylog = True, xr = [0, 2.0], yr = [1e-8, 100],
# lmargin = 0.15, bmargin = 0.08, rmargin = 0.98, tmargin = 0.84,
# key = ['maxrows 4', 'samplen 0.7', 'width -2', 'at graph 1.,1.2'],
# arrow_bar = 0.005, size = '10in,13in',
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.03,False],
# 'STAR Preliminary': [0.05,0.07,False],
# }
#)
# make mean pt plot
#yMinPt, yMaxPt = 0.95*min(yvalsPt), 1.05*max(yvalsPt)
#make_plot(
# data = [ # cocktail
# np.array(data_avpt[k+'_c']) for k in mee_keys
# ] + [ # medium
# np.array(data_avpt['LMR_m'])
# ] + [ # data
# np.array(data_avpt[k]) for k in mee_keys
# ],
# properties = [
# 'with lines lt 1 lw 4 lc %s' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ] + |
tgquintela/TimeSeriesTools | TimeSeriesTools/Measures/information_theory_measures.py | Python | mit | 9,838 | 0.001728 |
"""
Information theory measures
---------------------------
Collection of measures which uses the Information Theory.
"""
import numpy as np
import scipy
from ..utils.sliding_utils import sliding_embeded_transf
from ..utils.fit_utils import general_multiscale_fit
###############################################################################
#################### ENTROPY ##################################################
def entropy(ts, base=None):
"""Entropy measure of a given time-serie. That function is only appliable
for discrete valued time-serie.
Parameters
----------
ts: np.ndarray
the values of the measures of the time series in some time sample.
Returns
-------
ent: float
the value of the entropy of the possible values of the time-serie.
References
----------
..[1] http://orange.biolab.si/blog/2012/06/15/joint-entropy-in-python/
"""
# Format matrix in order to have a column-format dynamics
if len(ts.shape) < 2:
ts = np.atleast_2d(ts).T
# Initialize variables
rows, cols = ts.shape
entropies = np.zeros(shape=(cols, 1))
# Calculation of the entropy for each one of the columns
for i in range(cols):
X = ts[:, i].squeeze()
probs = [np.mean(X == c) for c in set(X)]
entropies[i] = scipy.stats.entropy(probs, base=base)
entropies = float(entropies.ravel()[0])
return entropies
#def shan_entropy(c):
# c_normalized = c/float(np.sum(c))
# c_normalized = c_normalized[np.nonzero(c_normalized)]
# H = -sum(c_normalized* np.log(c_normalized))
# return H
#
## TODEPRECATE
#def entropy(X1):
###http://orange.biolab.si/blog/2012/06/15/joint-entropy-in-python/
# if len(X1.shape)<2:
# X1 = np.matrix(X1).T
# [rows, cols] = X1.shape
# entropies = np.zeros(shape=(cols,1))
# for i in range(cols):
# X = X1[:,i]
# probs = [np.mean(X == c) for c in set(X)]
# entropies[i] = np.sum(-p * np.log2(p) for p in probs)
# #print entropies
# return entropies
#
## FASTER Possible alternative
#def entropy2(X1, base = None):
###http://orange.biolab.si/blog/2012/06/15/joint-entropy-in-python/
# if len(X1.shape)<2:
# X1 = np.matrix(X1).T
# [rows, cols] = X1.shape
# entropies = np.zeros(shape=(cols,1))
# for i in range(cols):
# X = X1[:,i]
# probs = np.histogram(X, bins = len(set(X)) ,density=True)[0]
# entropies[i] = scipy.stats.entropy(probs, base=base)
# return entropies
###############################################################################
########################## Svd Entropy ########################################
def svd_entropy(X, tau=1, D=1):
"""Compute SVD Entropy from time series.
Notes
-------------
"""
# Substitute to a function.
Y = sliding_embeded_transf(X, tau, D)
# Singular value descomposition
W = np.linalg.svd(Y, compute_uv=0)
# Normalize singular values
W /= np.sum(W)
# Compute entropy of svd
H_svd = - np.sum(W*np.log(W))
# Format output
H_svd = float(H_svd)
return H_svd
###############################################################################
########################## Spectral Entropy ###################################
def spectral_entropy(X, bins=50):
"""Compute spectral entropy of a time series. Spectral entropy is the
entropy associated to the entropy in the distribution of the power of a
ti | me series between its frequency spectrum space.
Parameters
----------
X : array_like, shape(N,)
a 1-D real time series.
bins : int
number of bins in which we want to discretize the frequency spectrum
space in order to compute the entropy.
Returns
-------
H_sp : float
Spectral entropy
TODO:
----
Fs and its influence in the entropy. And part of the entropy dividing.
Dependent on the num | ber of bins!!!!!!!!!!!!!!!!
"""
# Power spectral
ps = np.abs(np.fft.fft(X))**2
# binning:
psd, freq = np.histogram(ps, bins, normed=True)
# Compute entropy (?)
H_sp = - np.sum(psd * np.log2(psd+1e-16))/np.log2(psd.shape[0])
H_sp = float(H_sp)
return H_sp
###############################################################################
########################## Fisher information #################################
def fisher_info(X, tau=1, D=1):
""" Compute Fisher information of a time series.
Parameters
----------
X : array_like, shape(N,)
a 1-D real time series.
tau : integer
the lag or delay when building a embedding sequence. tau will be used
to build embedding matrix and compute singular values.
D : integer
the embedding dimension to build an embedding matrix from a given
series. DE will be used to build embedding matrix and compute
singular values if W or M is not provided.
Returns
-------
FI : integer
Fisher information
"""
# Substitute to a function.
Y = sliding_embeded_transf(X, tau, D)
# Singular value descomposition
W = np.linalg.svd(Y, compute_uv=0)
# W /= np.sum(W)
# Compute Fisher information
# FI = np.sum((W[1:] - W[:W.shape[0]-1])**2)/W[:W.shape[0]-1]
# FI = FI[0]
FI = -1.*np.sum(W*np.log(W))
# print FI, type(FI)
# FI = float(FI[0])
return FI
###############################################################################
########################## Fisher information #################################
def dfa(X):
"""Compute Detrended Fluctuation Analysis from a time series X. There is
an adaptation function of the one provided in pyEGG.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X: array_like, shape(N,)
a time series
L: 1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha : integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n).
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(4096)
>>> dfa(a)
0.490035110345
Reference
---------
.. [1] Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of
scaling exponents and crossover phenomena in nonstationary heartbeat
time series. _Chaos_ 1995;5:82-87
.. [2] http://www.physionet.org/tutorials/fmnc/node5.html
Notes
-----
This value depends on the box sizes very much. When the input is a white
|
bobsilverberg/oneanddone | oneanddone/users/tests/test_mixins.py | Python | mpl-2.0 | 2,289 | 0.000874 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.exceptions import PermissionDenied
from mock import Mock, patch
from nose.tools import eq_, raises
from oneanddone.base.tests import TestCase
from oneanddone.users.mixins import BaseUserProfileRequiredMixin, MyStaffUserRequiredMixin
from oneanddone.users.tests import UserFactory, UserProfileFactory
class FakeMixin(object):
def dispatch(self, request, *args, **kwargs):
return 'fakemixin'
class FakeView(BaseUserProfileRequiredMixin, FakeMixin):
pass
class FakeViewNeedsStaff(MyStaffUserRequiredMixin, FakeMixin):
pass
class MyStaffUserRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeViewNeedsStaff()
def test_is_staff(self):
"""
If the user is staff, call the parent class's
dispatch method.
"""
request = Mock()
request.user = UserFactory.create(is_staff=True)
eq_(self.view.dispatch(request), 'fakemixin')
@raises(PermissionDenied)
def test_not_staff(self):
"""
If the user is not staff, raise a PermissionDenied exception.
"""
request = Mock()
request.user = UserFactory.create(is_staff=False)
self.view.dispatch(request)
class UserProfileRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeView()
def test_has_profile(self):
"""
If the user has created a profile, and has accepted privacy policy
call the parent class's dispatch method.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=True).user
eq_(self.view.dispatch(request), 'fakemixin')
def test_no_profile(self):
"""
If the user hasn't created a profile, redirect them to the
profile creation view. |
"""
request = Mock()
request.user = UserFactory.create()
with patch('one | anddone.users.mixins.redirect') as redirect:
eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.create')
|
beeftornado/sentry | tests/sentry/api/test_paginator.py | Python | bsd-3-clause | 23,706 | 0.001012 | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from unittest import TestCase as SimpleTestCase
from sentry.api.paginator import (
BadPaginationError,
Paginator,
DateTimePaginator,
OffsetPaginator,
SequencePaginator,
GenericOffsetPaginator,
CombinedQuerysetIntermediary,
CombinedQuerysetPaginator,
reverse_bisect_left,
)
from sentry.models import User, Rule
from sentry.incidents.models import AlertRule
from sentry.testutils import TestCase, APITestCase |
from sentry.utils.cursors import Cursor
class PaginatorTest(TestCase):
cls = Paginator
def test_max_limit(self):
self.create_u | ser("foo@example.com")
self.create_user("bar@example.com")
self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = self.cls(queryset, "id", max_limit=10)
result = paginator.get_result(limit=2, cursor=None)
assert len(result) == 2
paginator = self.cls(queryset, "id", max_limit=1)
result = paginator.get_result(limit=2, cursor=None)
assert len(result) == 1
def test_count_hits(self):
self.create_user("foo@example.com")
self.create_user("bar@example.com")
queryset = User.objects.filter(email="foo@example.com")
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 1
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 2
queryset = User.objects.none()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 0
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1)
assert result == 1
def test_prev_emptyset(self):
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result1 = paginator.get_result(limit=1, cursor=None)
res1 = self.create_user("foo@example.com")
result2 = paginator.get_result(limit=1, cursor=result1.prev)
assert len(result2) == 1, (result2, list(result2))
assert result2[0] == res1
result3 = paginator.get_result(limit=1, cursor=result2.prev)
assert len(result3) == 0, (result3, list(result3))
class OffsetPaginatorTest(TestCase):
# offset paginator does not support dynamic limits on is_prev
def test_simple(self):
res1 = self.create_user("foo@example.com")
res2 = self.create_user("bar@example.com")
res3 = self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = OffsetPaginator(queryset, "id")
result1 = paginator.get_result(limit=1, cursor=None)
assert len(result1) == 1, result1
assert result1[0] == res1
assert result1.next
assert not result1.prev
result2 = paginator.get_result(limit=1, cursor=result1.next)
assert len(result2) == 1, (result2, list(result2))
assert result2[0] == res2
assert result2.next
assert result2.prev
result3 = paginator.get_result(limit=1, cursor=result2.next)
assert len(result3) == 1, result3
assert result3[0] == res3
assert not result3.next
assert result3.prev
result4 = paginator.get_result(limit=1, cursor=result3.next)
assert len(result4) == 0, result4
assert not result4.next
assert result4.prev
result5 = paginator.get_result(limit=1, cursor=result4.prev)
assert len(result5) == 1, result5
assert result5[0] == res3
assert not result5.next
assert result5.prev
def test_negative_offset(self):
self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = OffsetPaginator(queryset)
cursor = Cursor(10, -1)
with self.assertRaises(BadPaginationError):
paginator.get_result(cursor=cursor)
cursor = Cursor(-10, 1)
with self.assertRaises(BadPaginationError):
paginator.get_result(cursor=cursor)
def test_order_by_multiple(self):
res1 = self.create_user("foo@example.com")
self.create_user("bar@example.com")
res3 = self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = OffsetPaginator(queryset, "id")
result = paginator.get_result(limit=1, cursor=None)
assert len(result) == 1, result
assert result[0] == res1
assert result.next
assert not result.prev
res3.update(is_active=False)
paginator = OffsetPaginator(queryset, ("is_active", "id"))
result = paginator.get_result(limit=1, cursor=None)
assert len(result) == 1, result
assert result[0] == res3
assert result.next
assert not result.prev
result = paginator.get_result(limit=1, cursor=result.next)
assert len(result) == 1, (result, list(result))
assert result[0] == res1
assert result.next
assert result.prev
def test_max_offset(self):
self.create_user("foo@example.com")
self.create_user("bar@example.com")
self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = OffsetPaginator(queryset, max_offset=10)
result1 = paginator.get_result(cursor=None)
assert len(result1) == 3, result1
paginator = OffsetPaginator(queryset, max_offset=0)
with self.assertRaises(BadPaginationError):
paginator.get_result()
class DateTimePaginatorTest(TestCase):
def test_ascending(self):
joined = timezone.now()
# The DateTime pager only has accuracy up to 1000th of a second.
# Everything can't be added within less than 10 microseconds of each
# other. This is handled by the pager (see test_rounding_offset), but
# this case shouldn't rely on it.
res1 = self.create_user("foo@example.com", date_joined=joined)
res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(seconds=1))
res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(seconds=2))
res4 = self.create_user("qux@example.com", date_joined=joined + timedelta(seconds=3))
queryset = User.objects.all()
paginator = DateTimePaginator(queryset, "date_joined")
result1 = paginator.get_result(limit=2, cursor=None)
assert len(result1) == 2, result1
assert result1[0] == res1
assert result1[1] == res2
assert result1.next
assert not result1.prev
result2 = paginator.get_result(limit=2, cursor=result1.next)
assert len(result2) == 2, result2
assert result2[0] == res3
assert result2[1] == res4
assert not result2.next
assert result2.prev
result3 = paginator.get_result(limit=1, cursor=result2.prev)
assert len(result3) == 1, result3
assert result3[0] == res2
assert result3.next
assert result3.prev
result4 = paginator.get_result(limit=1, cursor=result3.prev)
assert len(result4) == 1, result4
assert result4[0] == res1
assert result4.next
assert not result4.prev
def test_descending(self):
joined = timezone.now()
res1 = self.create_user("foo@example.com", date_joined=joined)
res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(seconds=1))
res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(seconds=2))
queryset = User.objects.all()
paginator = DateTimePaginator(queryset, "-date_joined")
result1 = paginator.get_result(limit=1, cursor=None)
assert len(result1) == 1, result1
assert result1[0] == res3
assert result1.next
assert not result1.prev
result2 = paginator.get_result(limit=2, cursor=result1.next)
assert len(result2) == 2, result2
a |
yerkesobservatory/seo | routines/ch.py | Python | gpl-3.0 | 48,676 | 0.000616 | """CALLHORIZONS - a Python interface to access JPL HORIZONS
ephemerides and orbital elements.
This module provides a convenient python interface to the JPL
HORIZONS system by directly accessing and parsing the HORIZONS
website. Ephemerides can be obtained through get_ephemerides,
orbital elements through get_elements. Function
export2pyephem provides an interface to the PyEphem module.
michael.mommert (at) nau.edu, latest version: v1.0.1, 2016-07-19.
This code is inspired by code created by Alex Hagen.
v1.0.3: ObsEclLon and ObsEclLat added to get_ephemerides
v1.0.2: Python 3.5 compatibility implemented
v1.0.1: get_ephemerides fixed
v1.0: bugfixes completed, planets/satellites accessible, too
v0.9: first release
"""
from __future__ import (print_function, unicode_literals)
import time
import numpy as np
try:
# Python 3
import urllib.request as urllib
except ImportError:
# Python 2
import urllib2 as urllib
class query():
# constructor
def __init__(self, targetname, smallbody=True, cap=True):
"""
Initialize query to Horizons
Parameters
----------
targetname : str
HORIZONS-readable target number, name, or designation
smallbody : boolean
use ``smallbody=False`` if targetname is a planet or spacecraft (optional, default: True)
cal : boolean
set to `True` to return the current apparition for comet targets.
Results
-------
None
"""
self.targetname = str(targetname)
self.not_smallbody = not smallbody
self.cap = cap
self.start_epoch = None
self.stop_epoch = None
self.step_size = None
self.discreteepochs = None
self.url = None
self.data = None
return None
# small body designation parsing
def parse_comet(self):
"""Parse `targetname` as if it were a comet.
Returns
-------
des : string or None
The designation of the comet or `None` if `targetname` does
not appear to be a comet name. Note that comets starting
with 'X/' are allowed, but this designation indicates a
comet without an orbit, so `query()` should fail.
Examples
--------
targetname des
1P/Halley 1P
3D/Biela 3D
9P/Tempel 1 9P
73P/Schwassmann Wachmann 3 C 73P # Note the missing "C"!
73P-C/Schwassmann Wachmann 3 C 73P-C
73P-BB 73P-BB
322P 322P
X/1106 C1 X/1106 C1
P/1994 N2 (McNaught-Hartley) P/1994 N2
P/2001 YX127 (LINEAR) P/2001 YX127
C/-146 P1 C/-146 P1
C/2001 A2-A (LINEAR) C/2001 A2-A
C/2013 US10 C/2013 US10
C/2015 V2 (Johnson) C/2015 V2
"""
import re
pat = ('^(([1-9]{1}[0-9]*[PD](-[A-Z]{1,2})?)'
'|([CPX]/-?[0-9]{1,4} [A-Z]{1,2}[1-9][0-9]{0,2}(-[A-Z]{1,2})?))')
m = re.findall(pat, self.targetname.strip())
if len(m) == 0:
return None
else:
return m[0][0]
def parse_asteroid(self):
"""Parse `targetname` as if it were a asteroid.
Returns
-------
des : string or None
The designation of the asteroid or `None` if `targetname` does
not appear to be an asteroid name.
Examples
--------
targetname des
1 1
(2) Pallas 2
(20 | 01) Einstein 2001
2001 AT1 2001 AT1
(1714) Sy 1714
1714 SY 1714 SY # Note the near-confusion with (1714)
2014 MU69 2014 MU69
2017 AA 2017 AA
"""
import re
pat = ('^(([1-9][0-9]*( [A-Z]{1,2}([1-9][0-9]{0,2})?)?)'
'|(\(([1-9][0-9]*)\)))')
m = re.findall(pat, self.targetname.strip())
if len(m) == 0: |
return None
else:
if len(m[0][5]) > 0:
return m[0][5]
else:
return m[0][0]
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time.
"""
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test
def iscomet(self):
"""`True` if `targetname` appears to be a comet."""
return self.parse_comet() is not None
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid."""
return self.parse_asteroid() is not None
# set epochs
def set_epochrange(self, start_epoch, stop_epoch, step_size):
"""Set a range of epochs, all times are UT
Parameters
----------
start_epoch : str
start epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
stop_epoch : str
final epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
step_size : str
epoch step size, e.g., '1d' for 1 day, '10m' for 10 minutes...
Returns
-------
None
Examples
--------
>>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-26', '2016-10-25', '1d')
Note that dates are mandatory; if no time is given, midnight is assumed.
"""
self.start_epoch = start_epoch
self.stop_epoch = stop_epoch
self.step_size = step_size
return None
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian
Dates
Parameters
----------
discreteepochs : list
list of floats or strings, maximum length: 15
Returns
-------
None
Examples
--------
>>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
If more than 15 epochs are provided, the list will be cropped to 15 epochs.
"""
if type(discreteepochs) is not list:
discreteepochs = [discreteepochs]
self.discreteepochs = discreteepochs
# data access functions
@property
def fields(self):
"""returns list of available properties for all epochs"""
try:
return self.data.dtype.names
except AttributeError:
return []
def __len__(self):
"""returns total number of epochs that have been queried"""
try:
# Cast to int because a long is returned from shape on Windows.
return int(self.data.shape[0])
except AttributeError:
return 0
@property
def dates(self):
"""returns list of epochs that have been queried (format 'YYYY-MM-DD HH-MM-SS')"""
try:
return self.data['datetime']
except:
return []
@property
def query(self):
"""returns URL that has been used in calling HORIZONS"""
try:
return self.url
except:
return []
@property
def dates_jd(self):
"""returns list of epochs that have been queried (Julian Dates)"""
try:
return self.data['datetime_jd']
except:
return []
def __repr__(self):
"""returns brief query information"""
return "<callhorizons.query object: %s>" % self.targetname
def __str__(self):
"""returns information on the current query as string"""
output = "targetname: %s\n" % self.targetname
if self.discreteepochs is not None:
output += "discrete epochs: %s\n" % \
" |
devonjones/PSRD-Parser | src/psrd/spell_lists.py | Python | gpl-3.0 | 5,140 | 0.028599 | import os
import json
import re
from BeautifulSoup import BeautifulSoup
from psrd.rules import write_rules
from psrd.files import char_replace
from psrd.universal import parse_universal, print_struct
from psrd.sections import ability_pass, is_anonymous_section, has_subsections, entity_pass, quote_pass
def core_structure_pass(section, filename):
section['name'] = 'Spell Lists'
sections = []
spell_lists = []
for s in section['sections']:
if s['name'].endswith('Spells'):
spell_lists.append(s)
elif s['name'].endswith('Formulae'):
spell_lists.append(s)
elif s['name'] != 'Spells by Class':
sections.append(s)
section['sections'] = sections
return section, spell_lists
def advanced_class_guide_structure_pass(section, filename):
spell_lists = section['sections'][6:]
del section['sections'][6:]
return section, spell_lists
def advanced_structure_pass(section, filename):
sections = []
spell_lists = []
top = section['sections'].pop(0)
top['name'] = "Spell Lists"
for s in section['sections']:
if s['name'].endswith('Spells'):
spell_lists.append(s)
return top, spell_lists
def ultimate_magic_structure_pass(section, filename):
section['sections'].pop(0)
return None, section['sections']
def spell_list_structure_pass(section, filename):
spell_lists = []
if filename == 'spellLists.html' and len(section['sections']) == 18:
section, spell_lists = mythic_structure_pass(section, filename)
elif section['source'] == "Advanced Class Guide":
section, spell_lists = advanced_class_guide_structure_pass(section, filename)
elif filename in ('spellLists.html'):
section, spell_lists = core_structure_pass(section, filename)
elif filename in ('advancedSpellLists.html', 'ultimateCombatSpellLists.html'):
section, spell_lists = advanced_structure_pass(section, filename)
elif filename in ('ultimateMagicSpellLists.html'):
section, spell_lists = ultimate_magic_structure_pass(section, filename)
else:
del section['sections']
print section
return section, spell_lists
def spell_list_name_pass(spell_lists):
retlists = []
for casting_class in spell_lists:
clname = casting_class['name']
clname = clname.replace('Spells', '').strip()
clname = clname.replace('Formulae', '').strip()
for sl in casting_class['sections']:
sl['type'] = 'spell_list'
if clname.find('Mythic') > -1:
clname = clname.replace('Mythic', '').strip()
sl['type'] = 'mythic_spell_list'
sl['class'] = clname
m = re.search('(\d)', sl['name'])
sl['level'] = int(m.group(0))
retlists.append(sl)
return retlists
def spell_pass(spell_list):
spells = []
school = None
descriptor = None
school_type = True
if spell_list['class'] in ['Elementalist Wizard']:
school_type = False
for s in spell_list['sections']:
if s.has_key('sections'):
if school_type:
school = s['name']
else:
descriptor = s['name']
for ss in s['sections']:
soup = BeautifulSoup(ss['text'])
spells.append(create_spell(ss['name'], soup, school, descriptor))
elif spell_list['source'] in ('Mythic Adventures', 'Advanced Race Guide'):# spell_list['type'] == 'mythic_spell_list':
soup = BeautifulSoup(s['text'])
spells.append(create_spell(s['name'], soup))
else:
soup = BeautifulSoup(s['text'])
if ''.join(soup.findAll(text=True)) == '':
if school_type:
school = s['name']
else:
descriptor = s['name']
else:
spells.append(create_spell(s['name'], soup, school, descriptor))
spell_list['spells'] = spells
del spell_list['sections']
return spell_list
def create_spell(name, soup, school=None, descriptor=None):
if name.endswith(":"):
name = name[:-1] |
comps = ""
if soup.find('sup'):
sup = soup.find('sup')
comps = sup.renderContents()
sup.replaceWith('')
if comps.find(",") > -1:
comps = [c.strip() for c in comps.split(",")]
else:
comps = list(comps)
desc = ''.join(soup.findAll(text=True))
if desc.startswith(":"):
desc = desc[1:].strip()
spell = {'name': name}
if desc.strip() != '':
desc = desc.strip()
desc = desc.replace("“", '"')
desc = des | c.replace("”", '"')
desc = desc.replace("–", '-')
spell['description'] = desc
if len(comps) > 0:
spell['material'] = comps
if school:
spell['school'] = school
if descriptor:
spell['descriptor'] = descriptor
return spell
def parse_spell_lists(filename, output, book):
struct = parse_universal(filename, output, book)
struct = quote_pass(struct)
struct = entity_pass(struct)
rules, spell_lists = spell_list_structure_pass(struct, os.path.basename(filename))
spell_lists = spell_list_name_pass(spell_lists)
for spell_list in spell_lists:
sl = spell_pass(spell_list)
print "%s: %s" %(sl['source'], sl['name'])
filename = create_spell_list_filename(output, book, sl)
fp = open(filename, 'w')
json.dump(sl, fp, indent=4)
fp.close()
if rules:
write_rules(output, rules, book, "spell_lists")
def create_spell_list_filename(output, book, spell_list):
title = char_replace(book) + "/spell_lists/" + char_replace(spell_list['class']) + "-" + unicode(spell_list['level'])
return os.path.abspath(output + "/" + title + ".json")
|
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Infovis/Python/tables4.py | Python | gpl-3.0 | 1,335 | 0.003745 | #!/usr/bin/env python
"""
This file provides a more advanced example of vtkTable access and
manipulation methods.
"""
from __future__ import print_function
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
print("vtkTable Example 4: Accessing vtkTable data elements")
# Load our table from a CSV file (covered in table2.py)
csv_source = vtkDelimitedTextReader()
csv_source.SetFieldDelimiterCharacters(",")
csv_sour | ce.SetHaveHeaders(True)
csv_source.SetFileName("table_data.csv" | )
csv_source.Update()
csv_source.GetOutput().Dump(6)
T = csv_source.GetOutput()
# Print some information about the table
print("Number of Columns =", T.GetNumberOfColumns())
print("Number of Rows =", T.GetNumberOfRows())
print("Get column 1, row 4 data: ", T.GetColumn(1).GetValue(4))
# Add a new row to the table
new_row = [8, "Luis", 68]
for i in range( T.GetNumberOfColumns()):
T.GetColumn(i).InsertNextValue( str(new_row[i]) )
print("Table after new row appended:")
T.Dump(6)
print("vtkTable Example 4: Finished.")
|
NetApp/manila | manila/tests/db/test_api.py | Python | apache-2.0 | 2,190 | 0 | # Copyright (c) Goutham Pacha Ravi.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for the interface methods in the manila/db/api.py."""
import re
from manila.db import api as db_interface
from manila.db.sqlalchemy import api as db_api
from manila import test
class DBInterfaceTestCase(test.TestCase):
"""Test cases for the DB Interface methods."""
def setUp(self):
super(self.__class__, self).setUp()
def test_interface_methods(self):
"""Ensure that implementation methods match interfaces.
manila/db/api module is merely shim layer between the database
implementation and the other methods using these implementations.
Bugs are introduced when the shims go out of sync with the actual
implementation. So this test ensures that method names and
signatures match between the interface and the implementation.
"""
members = dir(db_interface)
# Ignore private methods for the file and any other members that
# need not match.
ignore_members = re.compile(r'^_|CONF|IMPL')
interfaces = [i for i in members if not ignore_members.match(i)]
for interface in interfaces:
method = getattr(db_interface, interface)
if callable(method):
| mock_method_call = self.mock_object(db_api, interface)
# kwargs always specify defaults, ignore them in the signature.
args = filter(
| lambda x: x != 'kwargs', method.__code__.co_varnames)
method(*args)
self.assertTrue(mock_method_call.called)
|
Asurada2015/TFAPI_translation | framework_ops/Utility functions/tf_name_scope.py | Python | apache-2.0 | 401 | 0.005935 | """
def name_scope(name, default_name=None, values=None):
Wrapper for Graph.name_s | cope() using the default graph.
使用默认图包装“Graph.name_scope()
See
Graph.name_scope()
for more detai | ls.
Args:
name: A name for the scope.
Returns:
A context manager that installs name as a new name scope in the
default graph.
在默认图中安装名称作为一个新名称范围的内容管理器
""" |
tensorflow/examples | lite/examples/speech_commands/ml/export/convert_keras_to_quantized.py | Python | apache-2.0 | 6,406 | 0.004527 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input arguments.
num_output: this value has nothing to do with the number of classes, batch_size,
etc.,
and it is mostly equal to 1. If the network is a **multi-stream network**
(forked network with multiple outputs), set the value to the number of outputs.
quantize: if set to True, use the quantize feature of Tensorflow
(https://www.tensorflow.org/performance/quantization) [default: False]
use_theano: Thaeno and Tensorflow implement convolution in different ways.
When using Keras with Theano backend, the order is set to 'channels_first'.
This feature is not fully tested, and doesn't work with quantizization [default:
False]
input_fld: directory holding the keras weights file [default: .]
output_fld: destination directory to save the tensorflow files [default: .]
input_model_file: name of the input weight file [default: 'model.h5']
output_model_file: name of the output weight file [default:
args.input_model_file + '.pb']
graph_def: if set to True, will write the graph definition as an ascii file
[default: False]
output_graphdef_file: if graph_def is set to True, the file name of the
graph definition [default: model.ascii]
output_node_prefix: the prefix to use for output nodes. [default: output_node]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from keras import backend as K
from model import conv_1d_time_stacked_model
from pathlib import Path
import tensorflow.compat.v1 as tf
parser = argparse.ArgumentParser(description='set input arguments')
parser.add_argument(
'-input_fld', action='store', dest='input_fld', type=str, default='.')
parser.add_argument(
'-output_fld', action='store', dest='output_fld', type=str, default='')
parser.add_argument(
'-input_model_file',
action='store',
dest='input_model_file',
type=str,
default='model.h5')
parser.add_argument(
'-output_model_file',
action='store',
dest='output_model_file',
type=str,
default='')
parser.add_argument(
'-output_graphdef_file',
action='store',
dest='output_graphdef_file',
type=str,
default='model.ascii')
parser.add_argument(
'-num_outputs', action='store', dest='num_outputs', type=int, default=1)
parser.add_argument(
'-graph_def', action='store', dest='graph_def', type=bool, default=False)
parser.add_argument(
'-output_node_prefix',
action='store',
dest='output_node_prefix',
type=str,
default='output_node')
parser.add_argument(
'-quantize', action='store', dest='quantize', type=bool, default=False)
parser.add_argument(
'-theano_backend',
action='store',
dest='theano_backend',
type=bool,
default=False)
parser.add_argument('-f')
args = parser.parse_args()
parser.print_help()
print('input args: ', args)
if args.theano_backend and args.quantize:
raise ValueError('Quantize feature does not work with theano backend.')
output_fld = args.input_fld if not args.output_fld else args.output_fld
if not args.output_model_file:
args.output_model_file = str(Path(args.input_model_file).name) + '.pb'
Path(output_fld).mkdir(parents=True, exist_ok=True)
weight_file_path = str(Path(args.input_fld) / args.input_model_file)
# Load keras model and rename output
# In[ ]:
K.set_learning_phase(0)
if args.theano_backend:
K.set_image_data_format('channels_first')
else:
K.set_image_data_format('channels_last')
try:
fingerprint_size = 16000
label_count = 12
net_model = conv_1d_time_stacked_model(
fingerprint_size, num_classes=label_count)
net_model.load_weights('../conv_1d_time_stacked_model/ep-022-vl-0.2864.hdf5')
except ValueError as err:
print(
"""Input file specified ({}) only holds the weights, and not the model definition.
Save the model using mode.save(filename.h5) which will contain the network architecture
as well as its weights.
If the model is saved using model.save_weights(filename.h5), the model architecture is
expected to be saved separately in a json format and loaded prior to loading the weights.
Check the keras documentation for more details (https://keras.io/getting-started/faq/)"""
.format(weight_file_path))
raise err
num_output = args.num_outputs
pred = [None] * num_output
pred_node_names = [None] * num_output
for i in range(num_output):
pred_node_names[i] = args.output_node_prefix + str(i)
pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)
# [optional] write graph definition in ascii
# In[ ]:
sess = K.get_session()
if args.graph_def:
f = args.output_graphdef_file
tf.io.write_graph(sess.graph.as_ | graph_def(), output_fld, f, as_text=True)
print('saved the graph d | efinition in ascii format at: ',
str(Path(output_fld) / f))
# convert variables to constants and save
# In[ ]:
if args.quantize:
# graph_transforms will not be available for future versions.
from tensorflow.compat.v1.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top
transforms = ['quantize_weights', 'quantize_nodes']
transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
pred_node_names, transforms)
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, transformed_graph_def, pred_node_names)
else:
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), pred_node_names)
tf.io.write_graph(
constant_graph, output_fld, args.output_model_file, as_text=False)
print('saved the freezed graph (ready for inference) at: ',
str(Path(output_fld) / args.output_model_file))
|
Retzudo/manufactorum | manage.py | Python | agpl-3.0 | 1,443 | 0 | #!/usr/bin/env python3
import sys
import pytest
from flask.ext.script import Manager
from manufactorum import app
from manufactorum import users
from getpass import getpass
manager = Manager(app)
@manager.option(
'-h', '--host',
dest='host',
default='127.0.0.1'
| )
@manager.option(
'-p', '--port',
dest='port',
default='5000'
)
@manager.option(
'--no-debug',
dest='no_debug',
action='store_true',
help='Disable debugging mode'
)
def run(host='127.0 | .0.1', port=5000, no_debug=False):
app.run(host=host, port=int(port), debug=(not no_debug))
@manager.command
def add_admin():
username = input('Username: ')
password = getpass('Password: ')
password_repeat = getpass('Repeat: ')
if password == password_repeat:
try:
users.add_user(username, password)
print('User {} added successfully'.format(username))
except ValueError:
print(
'User {} already exists.'.format(username),
file=sys.stderr
)
else:
print('Passwords did not match.', file=sys.stderr)
@manager.option(
'-c', '--coverage',
action='store_true',
help='Run with coverage'
)
def test(coverage):
args = ['--ignore=env']
if coverage:
args.append('--cov=manufactorum')
args.append('--cov-report=term-missing')
pytest.main(args)
if __name__ == '__main__':
manager.run()
|
pykello/hdb | launch-hdb.py | Python | bsd-2-clause | 2,162 | 0.000463 | #!/usr/bin/env python
import argparse
import os
import socket
import sys
import time
from hdb import client
from hdb import main
import json
from daemon import Daemon
class HDBDaemon(Daemon):
def run(self):
main.main(self.args)
def set_args(self, args):
self.args = args
def is_port_open(address, port):
s = socket.socket()
try:
s.bind((address, port))
s.close()
return True
except socket.error, e:
return False
def find_open_port(start_from):
port = start_from
while not is_port_open('localhost', port):
port += 1
return port
def get_bucket_map(ports):
bucket_map = [['localhost', ports[i % len(ports)]] for i in range(4096)]
return bucket_map
def send_bucket_map(hostname, port, bucket_map):
msg = {"code": "BucketMapUpdate", "bucket_map": bucket_map}
response = client.request((hostname, port), msg)
if response["code"] != "OK":
print "Error in sending bucket map to (localhost, %d)" % (port, )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HDB launch script')
parser.add_argument('instance_count', action='store', type=int)
parser.add_argument('node_count_max', action='store', type= | int)
parser.add_argument('rel_count_max', action='store', type=int)
args = parser.parse_args()
instance_count = args.instance_count
node_count_max = args.node_count_max
rel_count_max = args.rel_count_max
port = 5430
ports = []
for i in range(instance_count):
port = find_open_port(port)
pid = os.fork()
if pid == 0:
daemon = HDBDaemon(stde | rr='/tmp/hdb.err', stdout='/tmp/hdb.out')
daemon.set_args(['main.py', str(port), str(node_count_max),
str(rel_count_max)])
daemon.start()
else:
sys.stdout.write("%d\n" % (port,))
sys.stdout.flush()
ports.append(port)
port += 1
time.sleep(1)
bucket_map = get_bucket_map(ports)
for port in ports:
send_bucket_map('localhost', port, bucket_map)
sys.exit(0)
|
tysonclugg/django | tests/generic_views/test_dates.py | Python | bsd-3-clause | 35,527 | 0.003124 | import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Artist, Author, Book, BookSigning, Page
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
@override_settings(ROOT_URLCONF='generic_views.urls')
class ArchiveIndexViewTests(TestDataMixin, TestCase):
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objec | ts.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res | = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertNotIn('latest', res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
msg = (
'BookArchive is missing a QuerySet. Define BookArchive.model, '
'BookArchive.queryset, or override BookArchive.get_queryset().'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
def test_archive_view_custom_sorting(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_custom_sorting_dec(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbynamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('-name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
@override_settings(ROOT_URLCONF='generic_views.urls')
class YearArchiveViewTests(TestDataMixin, TestCase):
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_security_rules_operations.py | Python | mit | 22,467 | 0.004896 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations:
"""SecurityRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_ | security_group_name, 'str'),
'securityRuleName': se | lf._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def get(
sel |
Zlash65/erpnext | erpnext/patches/v11_0/set_default_email_template_in_hr.py | Python | gpl-3.0 | 297 | 0.023569 | from __future__ import unicode_literals
import frappe
def execute():
hr_settings = frappe.get | _single("HR Settings")
hr_settings.leave_approval_notification_tem | plate = "Leave Approval Notification"
hr_settings.leave_status_notification_template = "Leave Status Notification"
hr_settings.save() |
MDA2014/django-xpand | django_project/django_project/urls.py | Python | mit | 439 | 0.009112 | fro | m django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('apps.home.urls', namespace='home', app_name='home')),
url(r'^app/', include('apps.gen.urls', namespace='gen', app_name='gen')),
url(r'^admi | n/', include(admin.site.urls)),
)
|
CallmeTorre/RicePanel | ricepanel/manage.py | Python | apache-2.0 | 252 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ricepanel.se | ttings")
from django.core.management import execute_from_command_line
| execute_from_command_line(sys.argv)
|
keon/algorithms | algorithms/strings/__init__.py | Python | mit | 1,248 | 0.000801 | from .add_binary import *
from .breaking_bad import *
from .decode_string import *
from .delete_reoccurring import *
from .domain_extractor import *
from .encode_decode import *
from .group_anagrams import *
from .int_to_roman import *
from .is_palindrome import *
from .is_rotated import *
from .license_number import *
from .make_sentence import *
from .merge_string_checker import *
from .multiply_strings import *
from .one_edit_distance import *
from .rabin_karp import *
from .reverse_string import *
from .reverse_vowel import *
from .reverse_words import *
from .roman_to_int import *
from .strip_url_params import *
from .validate_coordinates import *
from .word_squares import *
from | .unique_morse import *
from .judge_circle import *
from .strong_password import *
from .caesar_cipher import *
fro | m .check_pangram import *
from .contain_string import *
from .count_binary_substring import *
from .repeat_string import *
from .text_justification import *
from .min_distance import *
from .longest_common_prefix import *
from .rotate import *
from .first_unique_char import *
from .repeat_substring import *
from .atbash_cipher import *
from .longest_palindromic_substring import *
from .knuth_morris_pratt import *
from .panagram import * |
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/protocols/ident.py | Python | bsd-3-clause | 7,930 | 0.002144 | # -*- test-case-name: twisted.test.test_ident -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Ident protocol implementation.
"""
import struct
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log, failure
_MIN_PORT = 1
_MAX_PORT = 2 ** 16 - 1
class IdentError(Exception):
"""
Can't determine connection owner; reason unknown.
"""
identDescription = 'UNKNOWN-ERROR'
def __str__(self):
return self.identDescription
class NoUser(IdentError):
"""
The connection specified by the port pair is not currently in use or
currently not owned by an identifiable entity.
"""
identDescription = 'NO-USER'
class InvalidPort(IdentError):
"""
Either the local or foreign port was improperly specified. This should
be returned if either or both of the port ids were out of range (TCP
port numbers are from 1-65535), negative integers, reals or in any
fashion not recognized as a non-negative integer.
"""
identDescription = 'INVALID-PORT'
class HiddenUser(IdentError):
"""
The server was able to identify the user of this port, but the
information was not returned at the request of the user.
"""
identDescription = 'HIDDEN-USER'
class IdentServer(basic.LineOnlyReceiver):
"""
The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident
Protocol") provides a means to determine the identity of a user of a
particular TCP connection. Given a TCP port number pair, it returns a
character string which identifies the owner of that connection on the
server's system.
Server authors should subclass this class and override the lookup method.
The default implementation returns an UNKNOWN-ERROR response for every
query.
"""
def lineReceived(self, line):
parts = line.split(',')
if len(parts) != 2:
self.invalidQuery()
else:
try:
portOnServer, portOnClient = map(int, parts)
except ValueError:
self.invalidQuery()
else:
if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT:
self.validQuery(portOnServer, portOnClient)
else:
self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient)
def invalidQuery(self):
self.transport.loseConnection()
def validQuery(self, portOnServer, portOnClient):
"""
Called when a valid query is received to look up and deliver the
response.
@param portOnServer: The server port from the query.
@param portOnClient: The client port from the query.
"""
serverAddr = self.transport.getHost().host, portOnServer
clientAddr = self.transport.getPeer().host, portOnClient
defer.maybeDeferred(self.lookup, serverAddr, clientAddr
| ).addCallback(self._cbLookup, portOnServer, portOnClient
).addErrback | (self._ebLookup, portOnServer, portOnClient
)
def _cbLookup(self, (sysName, userId), sport, cport):
self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId))
def _ebLookup(self, failure, sport, cport):
if failure.check(IdentError):
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value))
else:
log.err(failure)
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value)))
def lookup(self, serverAddress, clientAddress):
"""Lookup user information about the specified address pair.
Return value should be a two-tuple of system name and username.
Acceptable values for the system name may be found online at::
U{http://www.iana.org/assignments/operating-system-names}
This method may also raise any IdentError subclass (or IdentError
itself) to indicate user information will not be provided for the
given query.
A Deferred may also be returned.
@param serverAddress: A two-tuple representing the server endpoint
of the address being queried. The first element is a string holding
a dotted-quad IP address. The second element is an integer
representing the port.
@param clientAddress: Like L{serverAddress}, but represents the
client endpoint of the address being queried.
"""
raise IdentError()
class ProcServerMixin:
"""Implements lookup() to grab entries for responses from /proc/net/tcp
"""
SYSTEM_NAME = 'LINUX'
try:
from pwd import getpwuid
def getUsername(self, uid, getpwuid=getpwuid):
return getpwuid(uid)[0]
del getpwuid
except ImportError:
def getUsername(self, uid):
raise IdentError()
def entries(self):
f = file('/proc/net/tcp')
f.readline()
for L in f:
yield L.strip()
def dottedQuadFromHexString(self, hexstr):
return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16)))))
def unpackAddress(self, packed):
addr, port = packed.split(':')
addr = self.dottedQuadFromHexString(addr)
port = int(port, 16)
return addr, port
def parseLine(self, line):
parts = line.strip().split()
localAddr, localPort = self.unpackAddress(parts[1])
remoteAddr, remotePort = self.unpackAddress(parts[2])
uid = int(parts[7])
return (localAddr, localPort), (remoteAddr, remotePort), uid
def lookup(self, serverAddress, clientAddress):
for ent in self.entries():
localAddr, remoteAddr, uid = self.parseLine(ent)
if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]:
return (self.SYSTEM_NAME, self.getUsername(uid))
raise NoUser()
class IdentClient(basic.LineOnlyReceiver):
errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser)
def __init__(self):
self.queries = []
def lookup(self, portOnServer, portOnClient):
"""Lookup user information about the specified address pair.
"""
self.queries.append((defer.Deferred(), portOnServer, portOnClient))
if len(self.queries) > 1:
return self.queries[-1][0]
self.sendLine('%d, %d' % (portOnServer, portOnClient))
return self.queries[-1][0]
def lineReceived(self, line):
if not self.queries:
log.msg("Unexpected server response: %r" % (line,))
else:
d, _, _ = self.queries.pop(0)
self.parseResponse(d, line)
if self.queries:
self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2]))
def connectionLost(self, reason):
for q in self.queries:
q[0].errback(IdentError(reason))
self.queries = []
def parseResponse(self, deferred, line):
parts = line.split(':', 2)
if len(parts) != 3:
deferred.errback(IdentError(line))
else:
ports, type, addInfo = map(str.strip, parts)
if type == 'ERROR':
for et in self.errorTypes:
if et.identDescription == addInfo:
deferred.errback(et(line))
return
deferred.errback(IdentError(line))
else:
deferred.callback((type, addInfo))
__all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser',
'IdentServer', 'IdentClient',
'ProcServerMixin']
|
markgw/pimlico | src/python/pimlico/modules/spacy/__init__.py | Python | gpl-3.0 | 278 | 0.007194 | """spaCy
Run spaCy tools and pipelines on | your datasets.
Currently only includes tokenization, but this could be expanded to include
many more of spaCy's tools.
Or, if you want a different tool/pipeline, you could create your own
module type following the same appr | oach.
""" |
crakama/bc_7_twitment | keys.py | Python | mit | 156 | 0.012821 | Alchemy sentiment ana | lysis: fb12d2c55fff36e1e268584e261b6b010b37279f
Africa Is Talking: 676dbd926bbb04fa69ce90ee81d3f5ffee2692aaf80eb5793bd70fe9 | 3e77dc2e
|
MichaelCoughlinAN/Odds-N-Ends | Python/test_server.py | Python | gpl-3.0 | 376 | 0.005319 | #!/usr/bin/env python
import socket
TCP_IP = ''
TCP_PORT = 5005
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print 'Starting'
print 'Connection address:', addr
while 1:
data = conn.recv(BUFF | ER_SIZE)
if not data: break
print | "Frame received: " + data
conn.close()
|
aebarber/ScriptToolbox | server-management/media-server/genoggrefs.py | Python | mit | 1,117 | 0.005372 | #!/usr/bin/env python
import os
import sys
def createOggDir (artistDirectory):
oggdir = os.path.join(artistDirectory, 'ogg')
if os.path.isdir(oggdir):
return True
else:
if os.path.exists(oggdir):
return False
else:
print("creating directory" + oggdir)
os.makedirs(oggdir)
for artistDirectory in os.listdir('./'):
| for root, directories, filenames in os.walk(artistDirectory):
for directory in directories:
if directory == 'ogg':
if createOggDir(artistDirectory):
sourceDirectory = os.path.join(os.p | ath.abspath(root), directory)
targetOggDirectory = os.path.join(artistDirectory, 'ogg')
targetLinkName = os.path.basename(os.path.normpath(root))
targetLink = os.path.join(targetOggDirectory, targetLinkName)
if not os.path.islink(targetLink):
print("symlinking \"" + sourceDirectory + "\" >> \"" + targetLink + "\"")
os.symlink(sourceDirectory, targetLink)
|
herilalaina/scikit-learn | sklearn/metrics/cluster/supervised.py | Python | bsd-3-clause | 31,399 | 0.000159 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.validation import check_array
from ...utils.fixes import comb
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique c | luster.
# These are perfect matches hence return 1 | .0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members o |
m8ttyB/socorro | webapp-django/crashstats/home/tests/test_views.py | Python | mpl-2.0 | 2,682 | 0 | from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
from django.conf import settings
from crashstats.crashstats.tests.test_views import BaseTestViews
class TestViews(BaseTestViews):
def test_home(self):
url = reverse('home:home', args=('WaterWolf',))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('WaterWolf Crash Data' in response.content)
ok_('WaterWolf 19.0' in response.content)
# Test with a different duration.
response = self.client.get(url, {'days': 14})
eq_(response.status_code, 200)
ok_('data-duration="14"' in response.content)
# Test with a different version.
response = self.client.get(url, {'version': '4.0.1'})
eq_(response.status_code, 200)
ok_('WaterWolf 4.0.1' in response.content)
ok_('WaterWolf 19.0' not in response.content)
def test_home_product_without_featured_versions(self):
url = reverse('home:home', args=('SeaMonkey',))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('SeaMonkey Crash Data' in response.content)
ok_('SeaMonkey 10.5' in response.content)
ok_('SeaMonkey 9.5' in response.content)
# Test with a different version.
response = self.client.get(url, {'version': '10.5'})
eq_(response.status_code, 200)
ok_('SeaMonkey 10.5' in response.content)
ok_('SeaMonkey 9.5' not in response.content)
def test_homepage_redirect(self):
response = self.client.get('/')
eq_(response.status_code, 302)
destination = reverse('home:home', args=[settings.DEFAULT_PRODUCT])
ok_(destination in response['Location'])
def test_homepage_products_redirect_without_versions(self):
url = '/home/produc | ts/WaterWolf'
# some legacy URLs have this
url += '/versions/'
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
destination = reverse('home:home', args=['WaterWolf'])
response = self.client.get(url)
eq_(response.status_code, redirect_code)
intermediate_dest = response['Location']
response = self.client.get(intermediate_dest)
| eq_(response.status_code, redirect_code)
ok_(destination in response['Location'], response['Location'])
def test_home_400(self):
url = reverse('home:home', args=('WaterWolf',))
response = self.client.get(url, {'days': 'xxx'})
eq_(response.status_code, 400)
ok_('Enter a whole number' in response.content)
eq_(response['Content-Type'], 'text/html; charset=utf-8')
|
rado0x54/project-euler | python/problem0027.py | Python | mit | 1,182 | 0.002538 | #!/usr/bin/env python3
"""Project Euler - Problem 27 Module"""
def problem27(ab_limit):
"""Prob | lem 27 - Quadratic primes"""
# upper limit
nr_primes = 2 * ab_limit * ab_limit + ab_limit
primes = [1] * (nr_primes - 2)
result = 0
for x in range(2, nr_primes):
if primes[x - 2] == 1:
# x is Prime, eliminate x*y for y > 1
y = (x - 2) + x
while y < nr_primes - 2:
primes[y] = 0
y += x
| # Largest seq
l_seq = 0
for a in range(-ab_limit + 1, ab_limit):
for b in range(2, ab_limit):
if primes[b - 2] == 0:
continue # no prime
# check formula
seq = 1
x = 2
while True:
v = (x**2) + (a * x) + b
if v > 1 and primes[v - 2] == 1:
seq += 1
else:
break
x += 1
if seq > l_seq:
l_seq = seq
result = a * b
return result
def run():
"""Default Run Method"""
return problem27(1000)
if __name__ == '__main__':
print("Result: ", run())
|
leki75/ansible | lib/ansible/module_utils/asa.py | Python | gpl-3.0 | 5,158 | 0.003102 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, EntityCollection
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
_CONNECTION = None
asa_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
'context': dict()
}
command_spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
def get_argspec():
return asa_argument_spec
def check_args(module):
provider = module.params['provider'] or {}
for key in asa_argument_spec:
if key not in ['provider', 'authorize'] and module.params[key]:
module.warn('argument %s has been deprecated and will be removed in a future version' % key)
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module)
context = module.params['context']
if context:
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
_CONNECTION.get(command)
return _CONNECTION
def to_commands(module, commands):
assert isinstance(commands, list), 'argument must be of type <list>'
transform = EntityCollection(module, command_spec)
commands = transform(commands)
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
module.warn('only show commands are supported when using check '
'mode, not executing `%s`' % item['command'])
return commands
def run_commands(module, commands, check_rc=True):
commands = to_commands(module, to_list(commands))
connection = get_connection(module)
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
conn = get_connection(module)
out = conn.get(cmd)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def load_config(module, config):
conn = get_connection(module)
conn.edit_config(config)
def get_defaults_flag(module):
rc, out, err = exec_command(module, 'show running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
| if line:
| commands.add(line.strip().split()[0])
if 'all' in commands:
return 'all'
else:
return 'full'
|
apmichaud/vitess-apm | py/vtdb/topology.py | Python | bsd-3-clause | 6,219 | 0.011417 | # Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# Give a somewhat sane API to the topology, using zkocc as the backend.
#
# There are two main use cases for the topology:
# 1. resolve a "db key" into a set of parameters that can be used to connect
# 2. resolve the full topology of all databases
#
import logging
import random
import time
from vtdb import dbexceptions
from vtdb import keyrange
from vtdb import keyrange_constants
from vtdb import keyspace
from vtdb import vtdb_logger
from zk import zkocc
# keeps a global version of the topology
# This is a map of keyspace_name: (keyspace object, time when keyspace was last fetched)
# eg - {'keyspace_name': (keyspace_object, time_of_last_fetch)}
# keyspace object is defined at py/vtdb/keyspace.py:Keyspace
__keyspace_map = {}
# Throttle to clear the keyspace cache and re-read it.
__keyspace_fetch_throttle = 5
def set_keyspace_fetch_throttle(throttle):
global __keyspace_fetch_throttle
__keyspace_fetch_throttle = throttle
# This returns the keyspace object for the keyspace name
# from the cached topology map or None if not found.
def get_keyspace(name):
try:
return __keyspace_map[name][0]
except KeyError:
return None
# This returns the time of last fetch for the keyspace name
# from the cached topology map or None if not found.
def get_time_last_fetch(name):
try:
return __keyspace_map[name][1]
except KeyError:
return None
# This adds the keyspace object to the cached topology map.
def __set_keyspace(ks):
__keyspace_map[ks.name] = (ks, time.time())
# This function refreshes the keyspace in the cached topology
# map throttled by __keyspace_fetch_throttle secs. If the topo
# server is unavailable, it retains the old keyspace object.
def refresh_keyspace(zkocc_client, name):
global __keyspace_fetch_throttle
time_last_fetch = get_time_last_fetch(name)
if time_last_fetch is None:
return
if (time_last_fetch + __keyspace_fetch_throttle) > time.time():
return
start_time = time.time()
ks = keyspace.read_keyspace(zkocc_client, name)
topo_rtt = time.time() - start_time
if ks is not None:
__set_keyspace(ks)
vtdb_logger.get_logger().topo_keyspace_fetch(name, topo_rtt)
# read all the keyspaces, populates __keyspace_map, can call get_keyspace
# after this step
def read_keyspaces(zkocc_client):
read_topology(zkocc_client, read_fqdb_keys=False)
# ZK paths look like:
# /zk/<cell>/vt/keyspaces/<keyspace>/shards/<shard>/<db_type>/<instance_id>
# this function returns:
# - a list of all the ex | isting <keyspace>.<shard>.<db_type>
# - optionally, a list of all existing endpoints:
# <keyspace>.<shard>.<db_type>.<instance_id>
def read_topology(zkocc_client, read_fqdb_keys=True):
fqdb_keys = []
db_keys = []
keyspace_list = zkocc_client.get_srv_keyspace_names('local')
# validate step
if len(keyspace_list) == 0:
vtdb_logger.get_logger() | .topo_empty_keyspace_list()
raise Exception('zkocc returned empty keyspace list')
for keyspace_name in keyspace_list:
try:
ks = keyspace.read_keyspace(zkocc_client, keyspace_name)
__set_keyspace(ks)
for shard_name in ks.shard_names:
for db_type in ks.db_types:
db_key_parts = [ks.name, shard_name, db_type]
db_key = '.'.join(db_key_parts)
db_keys.append(db_key)
if read_fqdb_keys:
db_instances = len(get_host_port_by_name(zkocc_client, db_key))
for db_i in xrange(db_instances):
fqdb_keys.append('.'.join(db_key_parts + [str(db_i)]))
except Exception:
vtdb_logger.get_logger().topo_bad_keyspace_data(keyspace_name)
return db_keys, fqdb_keys
# db_key is <keyspace>.<shard_name>.<db_type>[:<service>]
# returns a list of entries to try, which is an array of tuples
# (host, port, encrypted)
def get_host_port_by_name(topo_client, db_key, encrypted=False):
parts = db_key.split(':')
if len(parts) == 2:
service = parts[1]
else:
service = '_mysql'
host_port_list = []
encrypted_host_port_list = []
if service == '_vtocc' and encrypted:
encrypted_service = '_vts'
db_key = parts[0]
ks, shard, tablet_type = db_key.split('.')
try:
data = topo_client.get_end_points('local', ks, shard, tablet_type)
except zkocc.ZkOccError as e:
vtdb_logger.get_logger().topo_zkocc_error('do data', db_key, e)
return []
except Exception as e:
vtdb_logger.get_logger().topo_exception('failed to get or parse topo data', db_key, e)
return []
if 'Entries' not in data:
vtdb_logger.get_logger().topo_exception('topo server returned: ' + str(data), db_key, e)
raise Exception('zkocc returned: %s' % str(data))
for entry in data['Entries']:
if service in entry['NamedPortMap']:
host_port = (entry['Host'], entry['NamedPortMap'][service],
service == '_vts')
host_port_list.append(host_port)
if encrypted and encrypted_service in entry['NamedPortMap']:
host_port = (entry['Host'], entry['NamedPortMap'][encrypted_service],
True)
encrypted_host_port_list.append(host_port)
if encrypted and len(encrypted_host_port_list) > 0:
random.shuffle(encrypted_host_port_list)
return encrypted_host_port_list
random.shuffle(host_port_list)
return host_port_list
def is_sharded_keyspace(keyspace_name, db_type):
ks = get_keyspace(keyspace_name)
shard_count = ks.get_shard_count(db_type)
return shard_count > 1
def get_keyrange_from_shard_name(keyspace, shard_name, db_type='replica'):
kr = None
# db_type is immaterial here.
if not is_sharded_keyspace(keyspace, db_type):
if shard_name == keyrange_constants.SHARD_ZERO:
kr = keyrange_constants.NON_PARTIAL_KEYRANGE
else:
raise dbexceptions.DatabaseError('Invalid shard_name %s for keyspace %s', shard_name, keyspace)
else:
kr_parts = shard_name.split('-')
if len(kr_parts) != 2:
raise dbexceptions.DatabaseError('Invalid shard_name %s for keyspace %s', shard_name, keyspace)
kr = keyrange.KeyRange((kr_parts[0].decode('hex'), kr_parts[1].decode('hex')))
return kr
|
offbye/PiBoat | pyboat/socket_client.py | Python | apache-2.0 | 525 | 0 | #!/usr/bin/python
# -*- encoding: UTF-8 -*-
# BoatServer created on 15/8/21 下午3:15
# Copyright 2014 offbye@gmail.com
"""
"""
__author | __ | = ['"Xitao":<offbye@gmail.com>']
from socket import *
host = '172.19.3.18'
port = 9999
bufsize = 1024
addr = (host, port)
client = socket()
client.connect(addr)
while True:
data = raw_input()
if not data or data == 'exit':
break
client.send('%s\r\n' % data)
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
pgmpy/pgmpy | docs/conf.py | Python | mit | 10,230 | 0.000587 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pgmpy documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 30 18:17:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../../pgmpy_notebooks/notebooks"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.autosectionlabel",
"nbsphinx",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pgmpy"
copyright = "2021, Ankur Ankan"
author = "Ankur Ankan, Abinash Panda"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "dev branch"
# The full version, including alpha/beta/rc tags.
release = "0.1.15"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { | "analytics_id": "UA-177825880-1"}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = 'pgmpy v0.1.2'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relati | ve to this directory) to place at the top
# of the sidebar.
# html_logo = "logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pgmpydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
lat |
Mango/mango-python | pymango/resources.py | Python | mit | 4,129 | 0.000969 | """
Mango Python Library Resources
"""
import pymango
from .client import req
from .error import InvalidApiKey
class Resource():
"""Mango Resource base class"""
name = None
def __init__(self):
if not pymango.api_key:
raise InvalidApiKey
@classmethod
def get_endpoint(cls):
"""
Get API endpoint using the API version and the Resource name
:return: String with endpoint, ex: v1/charges/
"""
return "v{version}/{name}/".format(
version=pymango.version,
name=cls.name
)
class GetableResource(Resource):
@classmethod
def get(cls, uid):
"""
Return the resource representation
:param uid: String with the UID of the resource
:return: Dictionary with the resource representation
"""
return req(pymango.api_key, "get", "{endpoint}{element}/".format(
endpoint=cls.get_endpoint(),
element=uid
))
class ListableResource(Resource):
@classmethod
def list(cls, **kwargs):
"""
Return a list of resources
:param kwargs: Any optional argument accepted by the resource
:return: Lis | t with a dictionary with the resource representation
"""
return req(pymango.api_key, | "get", cls.get_endpoint(), params=kwargs)
class ResourceCreatable(Resource):
@classmethod
def create(cls, **kwargs):
"""
Create a Charge
:param kwargs: Any optional argument accepted by the Charge resource
:return: Dictionary with Charge representation
"""
return req(pymango.api_key, "post", cls.get_endpoint(), kwargs)
class ResourceUpdatable(Resource):
"""Base class for updatable resources"""
@classmethod
def update(cls, uid, **kwargs):
"""
Update resource
:param uid: String with the UID of the resource
:param kwargs: Any optional argument accepted by the resource
:return: Dictionary with the resource representation
"""
return req(pymango.api_key, "patch", "{endpoint}{element}/".format(
endpoint=cls.get_endpoint(),
element=uid
), data=kwargs)
class ResourceDeletable(Resource):
"""Base class for deletable resources"""
@classmethod
def delete(cls, uid):
"""
Delete a resource
:param uid: String with the UID of the resource
:return: Dictionary with the resource representation
"""
return req(pymango.api_key, "delete", "{endpoint}{element}/".format(
endpoint=cls.get_endpoint(),
element=uid
))
class ResourceDeletableAll(Resource):
"""Base class for delete entire collection of resources"""
@classmethod
def delete_all(cls):
"""
Delete all resources from this collection
:return: True if success, false otherwise.
"""
return req(pymango.api_key, "delete", cls.get_endpoint())
class Charge(GetableResource, ListableResource, ResourceCreatable, Resource):
"""Mango Charge resource"""
name = "charges"
class Refund(GetableResource, ListableResource, ResourceCreatable, Resource):
"""Mango Refund resource"""
name = "refunds"
class Customer(GetableResource, ListableResource, ResourceCreatable, ResourceUpdatable, ResourceDeletable):
"""Mango Customer resource"""
name = "customers"
class Card(GetableResource, ListableResource, ResourceCreatable, ResourceUpdatable, ResourceDeletable):
"""Mango Card resource"""
name = "cards"
class Queue(GetableResource, ListableResource, ResourceDeletable, ResourceDeletableAll):
"""Mango Queue resource"""
name = "queue"
class Installment(ListableResource, Resource):
"""Mango Installment resource"""
name = "installments"
class Promotion(GetableResource, ListableResource, Resource):
"""Mango Promotion resource"""
name = "promotions"
class Coupon(GetableResource, ListableResource, ResourceCreatable, ResourceUpdatable, Resource):
"""Mango Coupon resource"""
name = "coupons"
|
AGMMGA/EM_scripts | EM_scripts/tests/Mass_rename_micrographs_argparse_tests.py | Python | gpl-2.0 | 12,315 | 0.009419 | import glob
import os
from pprint import pprint
import shlex
import shutil
import sys
import tempfile
import unittest
from unittest.mock import patch
# from context.EM_scripts import Micrograph_renamer as m
from scripts_EM.Mass_rename_micrographs_argparse import Micrograph_renamer as m
class test_rename_files(unittest.TestCase):
def setUp(self):
self.files = ['badname_1.mrc', 'badname_2.mrc', 'badname_3.mrc']
self.frames = ['{}_frame{}.mrc'.format(i.split('.mrc')[0], str(j))
for i in self.files
for j in range(2)]
self.tempdir = tempfile.mkdtemp()
#expected results
self.frame_suffix = '_frame#'
self.exp_integrated = [os.path.join(self.tempdir, 'integrated', i) for i in self.files]
self.exp_frames = [os.path.join(self.tempdir, 'frames', (i + self.frame_suffix))
for i in self.files]
for f in (self.files + self.frames):
with open(os.path.join(self.tempdir,f),'w') as f:
pass
def tearDown(self):
shutil.rmtree(self.tempdir,ignore_errors=True)
def test_normal_operations(self):
testargs = ('foo.py -input_dir {temp} -output_dir {temp} -filename new_#.mrc'
' -frames_suffix {frame} -EPU_image_pattern * -n_frames 2')
testargs = testargs.format(temp = self.tempdir, frame = self.frame_suffix)
with patch('sys.argv', testargs.split()):
obj = m()
frames, integrated = obj.find_mrc_files(obj.input_dir, obj.EPU_image_pattern,
obj.frames_suffix)
self.assertEqual(integrated.sort(), self.exp_integrated.sort())
self.assertEqual(frames.sort(), self.exp_frames.sort())
obj.rename_files(frames, integrated)
def test_missing_frames(self):
'''
removing one frame for image 1
the remaining frame should end up in missing frames;
the corresponding image in orphan images
'''
os.remove(os.path.join(self.tempdir, ('badname_1_frame0.mrc')))
# adding one orphan frame (no parent image)
with open(os.path.join(self.tempdir, 'badname_5_frame0.mrc'), 'w'):
pass
testargs = ('foo.py -input_dir {temp} -output_dir {temp} -filename new_#.mrc'
' -frames_suffix {frame} -EPU_image_pattern * -n_frames 2')
testargs = testargs.format(temp = self.tempdir, frame = se | lf.frame_suffix)
with patch('sys.argv', testargs.split()):
obj = m()
frames, integrated = obj.find_mrc_files(obj.input_dir, obj.EPU_image_pattern,
obj.frames_suffix)
| obj.rename_files(frames, integrated)
missing_frames = len(glob.glob(os.path.join(self.tempdir, 'missing_frames', '*.mrc')))
orphan_frames = len(glob.glob(os.path.join(self.tempdir, 'orphan_frames', '*.mrc')))
orphan_images = len(glob.glob(os.path.join(self.tempdir, 'orphan_integrated', '*.mrc')))
frames = len(glob.glob(os.path.join(self.tempdir, 'frames', '*.mrc')))
integrated = len(glob.glob(os.path.join(self.tempdir, 'integrated', '*.mrc')))
self.assertEqual(missing_frames, 1)
self.assertEqual(orphan_frames, 1)
self.assertEqual(orphan_images, 1)
self.assertEqual(frames, 4)
self.assertEqual(integrated, 2)
class test_find_files(unittest.TestCase):
def setUp(self):
self.files = ['badname_1.mrc', 'badname_2.mrc', 'badname_3.mrc']
self.frames = ['{}_frame{}.mrc'.format(i.split('.mrc')[0], str(j))
for i in self.files
for j in range(7)]
self.tempdir = tempfile.mkdtemp()
#expected results
self.frame_suffix = '_frame#'
self.exp_integrated = [os.path.join(self.tempdir, i) for i in self.files]
self.exp_frames = [os.path.join(self.tempdir, (i + self.frame_suffix))
for i in self.files]
for f in (self.files + self.frames):
with open(os.path.join(self.tempdir,f),'w') as f:
pass
def tearDown(self):
shutil.rmtree(self.tempdir,ignore_errors=True)
def test_find_files(self):
testargs = ('foo.py -input_dir {temp} -output_dir {temp} -filename #.mrc'
' -frames_suffix {frame} -EPU_image_pattern *')
testargs = testargs.format(temp = self.tempdir, frame = self.frame_suffix)
with patch('sys.argv', testargs.split()):
obj = m()
frames, integrated = obj.find_mrc_files(obj.input_dir, obj.EPU_image_pattern,
obj.frames_suffix)
self.assertEqual(integrated.sort(), self.exp_integrated.sort())
self.assertEqual(frames.sort(), self.exp_frames.sort())
def test_find_files_scans_subfolders(self):
#creating an extra file in a subfolder
os.makedirs(os.path.join(self.tempdir, 'subfolder'))
with open(os.path.join(self.tempdir, 'subfolder/4.mrc'),'w') as f:
self.exp_integrated += [f.name]
testargs = ('foo.py -input_dir {temp} -output_dir {temp} -filename #.mrc'
' -frames_suffix {frame} -EPU_image_pattern *')
testargs = testargs.format(temp = self.tempdir, frame = self.frame_suffix)
with patch('sys.argv', testargs.split()):
obj = m()
frames, integrated = obj.find_mrc_files(obj.input_dir, obj.EPU_image_pattern,
obj.frames_suffix)
self.assertEqual(integrated.sort(), self.exp_integrated.sort())
self.assertEqual(frames.sort(), self.exp_frames.sort())
def test_find_files_non_EPU_discarded(self):
#only the newly created file matches the EPU pattern
os.makedirs(os.path.join(self.tempdir, 'subfolder'))
with open(os.path.join(self.tempdir, 'subfolder/4.mrc'),'w') as f:
self.exp_integrated = [f.name]
self.exp_frames = []
EPU_image_pattern = '/subfolder/'
testargs = ('foo.py -input_dir {temp} -output_dir {temp} -filename #.mrc'
' -frames_suffix {frame} -EPU_image_pattern {EPU}')
testargs = testargs.format(temp = self.tempdir, frame = self.frame_suffix,
EPU = EPU_image_pattern)
with patch('sys.argv', testargs.split()):
obj = m()
frames, integrated = obj.find_mrc_files(obj.input_dir, obj.EPU_image_pattern,
obj.frames_suffix)
self.assertEqual(integrated.sort(), self.exp_integrated.sort())
self.assertEqual(frames.sort(), self.exp_frames.sort())
class test_check_args(unittest.TestCase):
def test_default_data_dir(self):
testargs = 'foo.py -output_dir /tmp -filename 1_a_#.mrc'.split()
with patch('sys.argv', testargs):
obj = m()
self.assertTrue(obj.check)
self.assertEqual(os.getcwd(), obj.input_dir)
def test_nonexisting_input_dir(self):
testargs = 'foo.py -filename 1_a_#.mrc -input_dir /not/exists ' + \
'-output_dir /tmp'
with patch('sys.argv', testargs.split()):
with self.assertRaises(SystemExit):
obj = m()
def test_existing_input_dir(self):
testargs = 'foo.py -filename 1_a_#.mrc -input_dir /tmp -output_dir /tmp'
with patch('sys.argv', testargs.split()):
obj = m()
self.assertTrue(obj.check)
self.assertEqual('/tmp', obj.input_dir)
def test_output_dir_creation(self):
testargs = 'foo.py -filename 1_a_#.mrc -output_dir /tmp/tmp'.split()
with patch('sys.argv', testargs):
with patch('os.mkdir') as mock:
obj = m()
self.assertTrue(obj.check)
self.assertTrue(mock.called)
def test_digits_co |
nburn42/tensorflow | tensorflow/python/grappler/graph_placer.py | Python | apache-2.0 | 4,478 | 0.007816 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph Placer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import errors
from t | ensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import hierarchical_controller
from tensorflow.python.grappler import item as gitem
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training import training
def PlaceGraph(metagraph,
cluster=None,
allotted_time=3600,
hparams=None,
verbose=False):
"""Place the provided me | tagraph.
Args:
metagraph: the metagraph to place.
cluster: an optional set of hardware resource to optimize the placement for.
If none is specified, we'll optimize the placement for the hardware
available on the local machine.
allotted_time: the maximum amount to time in seconds to spend optimizing
the placement.
hparams: hyperparameters used to fine tune the placer.
verbose: prints debug information if True.
Returns:
The placed metagraph.
"""
if cluster is None:
cluster = gcluster.Cluster()
# Optimize the metagraph to speedup the placement
rewriter_config = rewriter_config_pb2.RewriterConfig()
optimized_graph = tf_optimizer.OptimizeGraph(
rewriter_config, metagraph, verbose=verbose, cluster=cluster)
optimized_metagraph = meta_graph_pb2.MetaGraphDef()
optimized_metagraph.CopyFrom(metagraph)
optimized_metagraph.graph_def.CopyFrom(optimized_graph)
item = gitem.Item(optimized_metagraph)
# Measure the runtime achievable with the original placement.
try:
_, original_run_time, _ = cluster.MeasureCosts(item)
if verbose:
print("Runtime for original placement: " + str(original_run_time))
except errors.OpError as e:
if verbose:
print("Original placement isn't feasible: " + str(e))
original_run_time = hparams.failing_signal
if hparams is None:
hparams = hierarchical_controller.hierarchical_controller_hparams()
# We run with a single child
hparams.num_children = 1
with tf_ops.Graph().as_default():
# Place all the nodes of the controller on the CPU. We don't want them to
# fight for accelerator memory with the model to optimize.
with tf_ops.device("/device:CPU:0"):
model = hierarchical_controller.HierarchicalController(
hparams, item, cluster)
ops = model.build_controller()
session_creator = training.ChiefSessionCreator()
with training.MonitoredSession(session_creator=session_creator) as sess:
start_time = time.time()
current_time = start_time
while current_time - start_time < allotted_time:
grouping_actions = model.generate_grouping(sess)
input_to_seq2seq = model.create_group_embeddings(
grouping_actions, verbose=verbose)
model.generate_placement(input_to_seq2seq, sess)
try:
run_time = model.eval_placement(
sess,
verbose=verbose)
except errors.OpError as e:
if verbose:
print("Failed to run graph:" + str(e))
run_time = hparams.failing_signal
updated = model.update_reward(sess, run_time, verbose=verbose)
if updated and run_time < original_run_time:
if verbose:
print("Found better placement, with runtime " + str(run_time))
model.export_placement(metagraph)
model.process_reward(sess)
current_time = time.time()
return metagraph
|
mornsun/javascratch | src/topcoder.py/LC_375_Guess_Number_Higher_or_Lower_II.py | Python | gpl-2.0 | 2,229 | 0.010777 | #!/usr/bin/env python
#coding=utf8
'''
We are playing the Guess Game. The game is as follows:
I pick a number from 1 to n. You have to guess which number I picked.
Every time you guess wrong, I'll tell you whether the number I picked is higher or lower.
However, when you guess a particular number x, and you guess wrong, you pay $x. You win the game when you guess the number I picked.
Example:
n = 10, I pick 8.
First round: You guess 5, I tell you that it's higher. You pay $5.
Second round: You guess 7, I tell you that it's higher. You pay $7.
Third round: You guess 9, I tell you that it's lower. You pay $9.
Game over. 8 is the number I picked.
You end up paying $5 + $7 + $9 = $21.
Given a particular n ≥ 1, find out how much money you need to have to guarantee a win.
Related Topics
Dynamic Programming, Minimax
Similar Questions
Flip Game II, Guess Number Higher or Lower, Can I Win, Find K Closest Elements
f(i,j) = min(x + max(f(i,x-1), f(x+1,j) ) )
@author: Chauncey
beat 24.02%
'''
import heapq
import datetime
import time
import sys
import collections
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
if not n or n==1:
return 0
f = [[0]*n for _ in xrange(n)]
for i in xrange(n):
f[i][i] = i + 1
for k in xrange(1, n-1):
for i in xrange(n-k):
mini = min(f[i][i] + f[i+1][i+k], f[i][i+k-1] + f[i+k][i+k])
for x in xrange(i+1, i+k):
mini = min(mini, f[x][x] + max(f[i][x-1], f[x+1][i+k]) )
f[i][i+k] = mi | ni
print i,i+k,'=',mini
mini = min(f[1][n-1], f[0][n-2])
for x in xrange(1, n-1):
mini = min(mini, max(f[0][x-1], f[x+1][n-1]) )
f[0][n-1] = mini
return f[0][n-1]
if __name__ == '__main__':
solution = Solution()
start_time = datetime.datetime.now()
#print solution.getMoneyAmount(10) #
print solution.getMoneyAmount(1) #0
print solution.getMoneyAmount(2) #1
print solution.getMoneyAmount(3) #2
elapsed = datetime.d | atetime.now() - start_time
print 'elapsed:', elapsed.total_seconds() |
jeremiahyan/odoo | addons/calendar/tests/test_event_recurrence.py | Python | gpl-3.0 | 30,782 | 0.001364 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError
import pytz
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo.tests.common import TransactionCase
class TestRecurrentEvents(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
lang = cls.env['res.lang']._lang_get(cls.env.user.lang)
lang.week_start = '1' # Monday
def assertEventDates(self, events, dates):
events = events.sorted('start')
self.assertEqual(len(events), len(dates), "Wrong number of events in the recurrence")
self.assertTrue(all(events.mapped('active')), "All events should be active")
for event, dates in zip(events, dates):
start, stop = dates
self.assertEqual(event.start, start)
self.assertEqual(event.stop, stop)
class TestCreateRecurrentEvents(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 21, 8, 0),
'stop': datetime(2019, 10, 23, 18, 0),
'recurrency': True,
})
def test_weekly_count(self):
""" Every week, on Tuesdays, for 3 occurences """
detached_events = self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tue': True,
'interval': 1,
'count': 3,
'event_tz': 'UTC',
})
self.assertEqual(detached_events, self.event, "It should be detached from the recurrence")
self.assertFalse(self.event.recurrence_id, "It should be detached from the recurrence")
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 29, 8, 0), datetime(2019, 10, 31, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tue': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2_week_start_sunday(self):
lang = self.env['res.lang']._lang_get(self.env.user.lang)
lang.week_start = '7' # Sunday
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tue': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
lang.week_start = '1' # Monday
def test_weekly_until(self):
self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tue': True,
'interval': 2,
'end_type': 'end_date',
'until': datetime(2019, 11, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 2 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_monthly_count_by_date(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'date',
'day': 27,
'end_type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 27, 8, 0), datetime(2019, 10, 29, 18, 0)),
(datetime(2019, 12, 27, 8, 0), datetime(2019, 12, 29, 18, 0)),
(datetime(2020, 2 | , 27, 8, 0), datetime(2020, 2, 29, 18, 0)),
])
def test_monthly_count_by_date_31(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 1,
'month_by': 'date',
'day': 31,
'end_type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ | ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 31, 8, 0), datetime(2019, 11, 2, 18, 0)),
# Missing 31th in November
(datetime(2019, 12, 31, 8, 0), datetime(2020, 1, 2, 18, 0)),
(datetime(2020, 1, 31, 8, 0), datetime(2020, 2, 2, 18, 0)),
])
def test_monthly_until_by_day(self):
""" Every 2 months, on the third Tuesday, until 27th March 2020 """
self.event.start = datetime(2019, 10, 1, 8, 0)
self.event.stop = datetime(2019, 10, 3, 18, 0)
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'day',
'byday': '3',
'weekday': 'TUE',
'end_type': 'end_date',
'until': date(2020, 3, 27),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 15, 8, 0), datetime(2019, 10, 17, 18, 0)),
(datetime(2019, 12, 17, 8, 0), datetime(2019, 12, 19, 18, 0)),
(datetime(2020, 2, 18, 8, 0), datetime(2020, 2, 20, 18, 0)),
])
def test_monthly_until_by_day_last(self):
""" Every 2 months, on the last Wednesday, until 15th January 2020 """
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'monthly',
'month_by': 'day',
'weekday': 'WED',
'byday': '-1',
'end_type': 'end_date',
'until': date(2020, 1, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 30, 8, 0), datetime(2019, 11, 1, 18, 0)),
(datetime(2019, 12, 25, 8, 0), datetime(2019, 12, 27, 18, 0)),
])
def test_yearly_count(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'yearly',
'count': 2,
'event_tz': 'UTC', |
doismellburning/django | tests/delete/tests.py | Python | bsd-3-clause | 15,685 | 0.000638 | from __future__ import unicode_literals
from math import ceil
from django.db import models, IntegrityError, connection
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from django.utils.six.moves import range
from .models import (R, RChild, S, T, A, M, MR, MRNull,
create_a, get_default_r, User, Avatar, HiddenUser, HiddenUserProfile,
M2MTo, M2MFrom, Parent, Child, Base)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').rel.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertEqual(None, obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertEqual(None, a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
| T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create | |
bwhmather/json-config-parser | jsonconfigparser/tests/__init__.py | Python | bsd-3-clause | 6,075 | 0 | import unittest
import tempfile
from jsonconfigparser import JSONConfigParser, NoSectionError, ParseError
class JSONConfigTestCase(unittest.TestCase):
def test_init(self):
JSONConfigParser()
def test_read_string(self):
cf = JSONConfigParser()
cf.read_string((
'[section]\n'
'# comment comment\n'
'foo = "bar"\n'
'\n'
'[section2]\n'
'bar = "baz"\n'
))
self.assertEqual(cf.get('section', 'foo'), 'bar')
def test_read_file(self):
string = '[section]\n' + \
'foo = "bar"'
fp = tempfile.NamedTemporaryFile('w+')
fp.write(string)
fp.seek(0)
cf = JSONConfigParser()
cf.read_file(fp)
self.assertEqual(cf.get('section', 'foo'), 'bar')
def test_get(self):
cf = JSONConfigParser()
cf.add_section('section')
cf.set('section', 'section', 'set-in-section')
self.assertEqual(cf.get('section', 'section'), 'set-in-section')
def test_get_from_defaults(self):
cf = JSONConfigParser()
cf.set(cf.default_section, 'option', 'set-in-defaults')
try:
cf.get('section', 'option')
except NoSectionError:
pass
else: # pragma: no cover
self.fail("Only fall back to defaults if section exists")
cf.add_section('section')
self.assertEqual(cf.get('section', 'option'), 'set-in-defaults',
msg="get should fall back to defaults if value not \
set in section")
cf.set('section', 'option', 'set-normally')
self.assertEqual(cf.get('section', 'option'), 'set-normally',
msg="get shouldn't fall back if option is set \
normally")
def test_get_from_vars(self):
cf = JSONConfigParser()
cf.add_section('section')
cf.set('section', 'option', 'set-in-section')
self.assertEqual(cf.get('section', 'option',
vars={'option': 'set-in-vars'}),
'set-in-vars',
msg="vars should take priority over options in \
section")
self.assertEqual(cf.get('section', 'option', vars={}),
'set-in-section',
msg="get should fall back to section if option not \
in vars")
def test_get_from_fallback(self):
cf = JSONConfigParser()
cf.add_section('section')
# returns from fallback if section exists
self.assertEqual(cf.get('section', 'unset', 'fallback'), 'fallback')
try:
cf.get('nosection', 'unset', 'fallback')
except NoSectionError:
pass
else: # pragma: no cover
self.fail()
def test_has_option(self):
cf = JSONConfigParser()
# option in nonexistant section does not exist
self.assertFalse(cf.has_option('nonexistant', 'unset'))
cf.add_section('section')
self.assertFalse(cf.has_option('section', 'unset'),
msg="has_option should return False if section \
exists but option is unset")
| cf.set('section', 'set', 'set-normally')
self.assertTr | ue(cf.has_option('section', 'set'),
msg="has option should return True if option is set \
normally")
cf.set(cf.default_section, 'default', 'set-in-defaults')
self.assertTrue(cf.has_option('section', 'default'),
msg="has_option should return True if option set in \
defaults")
def test_remove_option(self):
cf = JSONConfigParser()
cf.add_section('section')
cf.set('section', 'normal', 'set-normally')
cf.set(cf.default_section, 'default', 'set-in-defaults')
# can remove normal options
self.assertTrue(cf.remove_option('section', 'normal'))
self.assertFalse(cf.has_option('section', 'normal'))
# can't remove defaults accidentally (maybe there should be shadowing)
self.assertFalse(cf.remove_option('section', 'default'))
self.assertEqual(cf.get('section', 'default'), 'set-in-defaults')
def test_invalid_section(self):
cf = JSONConfigParser()
try:
cf.read_string((
'[valid]\n'
'irrelevant = "meh"\n'
'[]'
))
except ParseError as e:
self.assertEqual(e.lineno, 3)
# check that nothing was added
self.assertEqual(sum(1 for _ in cf.sections()), 0)
else: # pragma: no cover
self.fail()
try:
cf.read_string((
'[nooooooooooooooooooo'
))
except ParseError as e:
self.assertEqual(e.lineno, 1)
# check that nothing was added
self.assertEqual(sum(1 for _ in cf.sections()), 0)
else: # pragma: no cover
self.fail()
def test_invalid_values(self):
cf = JSONConfigParser()
try:
cf.read_string((
'[section]\n'
'unmatched = [1,2,3}'
))
except ParseError as e:
self.assertEqual(e.lineno, 2)
# check that nothing was added
self.assertEqual(sum(1 for _ in cf.sections()), 0)
else: # pragma: no cover
self.fail()
try:
cf.read_string((
'[section]\n'
'unterminated = "something\n'
))
except ParseError as e:
self.assertEqual(e.lineno, 2)
# check that nothing was added
self.assertEqual(sum(1 for _ in cf.sections()), 0)
else: # pragma: no cover
self.fail()
suite = unittest.TestLoader().loadTestsFromTestCase(JSONConfigTestCase)
|
pedro2555/vatsim-status-proxy | setup.py | Python | gpl-2.0 | 995 | 0.021106 |
"""
VATSIM Status Proxy.
Copyright (C) 2017 - 2019 Pedro Rodrigues <prodrigues1990@gmail.com>
VATSIM Status Proxy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
VATSIM Status Proxy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for mo | re details.
You should have received a copy of the GNU General Pu | blic License
along with VATSIM Status Proxy. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup, find_packages
setup(
name='VATSIM Status Proxy',
version='1.0',
description = 'VATSIM Status Proxy',
author = 'Pedro Rodrigues',
author_email = 'prodrigues1990@gmail.com',
packages = find_packages(),
install_requires = [],
test_suite = 'tests',
) |
theo-l/django | django/core/serializers/xml_serializer.py | Python | bsd-3-clause | 16,785 | 0.00143 | """
XML serializer.
"""
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""Serialize a QuerySet to XML."""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
attrs = {'model': str(obj._meta)}
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj.pk
if obj_pk is not None:
| attrs['pk'] = str(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Handle each field on an o | bject (except for ForeignKeys and
ManyToManyFields).
"""
self.indent(2)
self.xml.startElement('field', {
'name': field.name,
'type': field.get_internal_type(),
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj.pk))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Handle a ForeignKey (they need to be treated slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(str(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Handle a ManyToManyField. Related objects are only serialized as
references to the object's PK (i.e. the related *data* is not dumped,
just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': str(value.pk)
})
m2m_iter = getattr(obj, '_prefetched_objects_cache', {}).get(
field.name,
getattr(obj, field.name).iterator(),
)
for relobj in m2m_iter:
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""Output the <field> element for relational fields."""
self.indent(2)
self.xml.startElement('field', {
'name': field.name,
'rel': field.remote_field.__class__.__name__,
'to': str(field.remote_field.model._meta),
})
class Deserializer(base.Deserializer):
"""Deserialize XML."""
def __init__(self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options):
super().__init__(stream_or_string, **options)
self.handle_forward_references = options.pop('handle_forward_references', False)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = using
self.ignore = ignorenonexistent
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""Convert an <object> node to a DeserializedObject."""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
deferred_fields = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
value = self._handle_m2m_field_node(field_node, field)
|
aymeric-spiga/planetoplot | bin/asciiplot.py | Python | gpl-2.0 | 1,508 | 0.031167 | #! /usr/bin/env python
import ppplot
import numpy as np
from optparse import OptionParser ### TBR by argparse
# inputs and options
parser = OptionParser()
parser.usage = \
'''
asciiplot.py [options] text file(s)
-- default is col2 for field and col1 for coord
-- this can be set with -x and -y options
(or use --swap if applicable)
-- one-column files are also supported
'''
parser.add_option('-x','--colx',action='store',dest='colx',type="int",default=1,help='column for x axis')
parser.add_option('-y','--coly',action='store',dest='coly',type="int",default=2,help='column for y axis')
parser.add_option('-s','--skip',action='store',dest='skiprows',type="int",default=0,help='skip first rows in file(s)')
parser.add_option('-u','--unique',action='store_true',dest='unique',default=False,help='case with one column only')
parser = ppplot.opt(parser) # common options for plots
parser = ppplot.opt1d(parser) # common options for plots
(opt,args) = parser.parse_args()
# plot object + options
pl = pppl | ot.plot1d()
# for all input files
count = 0
for fff in args:
# transfer options to plot object
pl.transopt(opt,num=count)
# load data
var = np.transpose(np.loadtxt(fff,skiprows=opt.skiprows))
# get coord
if len(var.shape) == 1:
pl.f = var
pl.x = None # important for chained plots
elif opt.unique:
pl.f = var[opt.coly-1]
pl.x = None
else:
pl.f = var[opt.coly-1]
pl.x = var[opt.colx-1]
# make plot
pl.make()
count = count + 1
# show plo | t
ppplot.show()
|
octavioturra/aritial | google_appengine/google/appengine/api/datastore.py | Python | apache-2.0 | 85,194 | 0.006526 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The Python datastore API used by app developers.
Defines Entity, Query, and Iterator classes, as well as methods for all of the
datastore's calls. Also defines conversions between the Python classes and
their PB counterparts.
The datastore errors are defined in the datastore_errors module. That module is
only required to avoid circular imports. datastore imports datastore_types,
which needs BadValueError, so it can't be defined in datastore.
"""
import heapq
import itertools
import logging
import os
import re
import string
import sys
import traceback
from xml.sax import saxutils
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.datastore import entity_pb
try:
__import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb')
taskqueue_service_pb = sys.modules.get(
'google.appengine.api.labs.taskqueue.taskqueue_service_pb')
except ImportError:
from google.appengine.api.taskqueue import taskqueue_service_pb
MAX_ALLOWABLE_QUERIES = 30
MAXIMUM_RESULTS = 1000
DEFAULT_TRANSACTION_RETRIES = 3
READ_CAPABILITY = capabilities.CapabilitySet('datastore_v3')
WRITE_CAPABILITY = capabilities.CapabilitySet(
'datastore_v3',
capabilities=['write'])
_MAX_INDEXED_PROPERTIES = 5000
_MAX_ID_BATCH_SIZE = 1000 * 1000 * 1000
Key = datastore_types.Key
typename = datastore_types.typename
_txes = {}
_ALLOWED_API_KWARGS = frozenset(['rpc'])
_ALLOWED_FAILOVER_READ_METHODS = set(
('Get', 'RunQuery', 'RunCompiledQuery', 'Count', 'Next'))
ARBITRARY_FAILOVER_MS = -1
STRONG_CONSISTENCY = 0
EVENTUAL_CONSISTENCY = 1
_MAX_INT_32 = 2**31-1
def NormalizeAndTypeCheck(arg, types):
"""Normalizes and type checks the given argument.
Args:
arg: an instance or iterable of the given type(s)
types: allowed type or tuple of types
Returns:
A (list, bool) tuple. The list is a normalized, shallow copy of the
argument. The boolean is True if the argument was a sequence, False
if it was a single object.
Raises:
AssertionError: types includes list or tuple.
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
if not isinstance(types, (list, tuple)):
types = (types,)
assert list not in types and tuple not in types
if isinstance(arg, types):
return [arg], False
else:
if isinstance(arg, basestring):
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
try:
arg_list = list(arg)
except TypeError:
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
for val in arg_list:
if not isinstance(val, types):
raise datastore_errors.BadArgumentError(
'Expected one of %s; received %s (a %s).' %
(types, val, typename(val)))
return arg_list, True
def NormalizeAndTypeCheckKeys(keys):
"""Normalizes and type checks that the given argument is a valid key or keys.
A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
Entities, and normalizes to Keys.
Args:
keys: a Key or sequence of Keys
Returns:
A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
Raises:
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
keys, multiple = NormalizeAndTypeCheck(keys, (basestring, Entity, Key))
keys = [_GetCompleteKeyOrError(key) for key in keys]
return (keys, multiple)
def GetRpcFromKwargs(kwargs):
if not kwargs:
return None
args_diff = set(kwargs) - _ALLOWED_API_KWARGS
if args_diff:
raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
return kwargs.get('rpc')
def _MakeSyncCall(service, call, request, response, rpc=None):
"""The APIProxy entry point for a synchronous API call.
Args:
service: string representing which service to call
call: string representing which function to call
request: protocol buffer for the request
response: protocol buffer for the response
rpc: datastore.DatastoreRPC to use for this request.
Returns:
Response protocol buffer. Caller should always use returned value
which may or may not be same as passed in 'response'.
Raises:
apiproxy_errors.Error or a subclass.
"""
if not rpc:
rpc = CreateRPC(service)
rpc.make_call(call, request, response | )
rpc.wait()
rpc.check_success()
return response
def CreateRPC(service='datastore_v3', deadline=None, callback=None,
read_policy=STRONG_CONSISTENCY):
"""Create an rpc for use in configuring datastore calls.
Args:
deadline: float, deadline for calls in seconds.
callback: callable, a callback triggered when this rpc completes,
accepts one argu | ment: the returned rpc.
read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually
consistent reads
Returns:
A datastore.DatastoreRPC instance.
"""
return DatastoreRPC(service, deadline, callback, read_policy)
class DatastoreRPC(apiproxy_stub_map.UserRPC):
"""Specialized RPC for the datastore.
Wraps the default RPC class and sets appropriate values for use by the
datastore.
This class or a sublcass of it is intended to be instatiated by
developers interested in setting specific request parameters, such as
deadline, on API calls. It will be used to make the actual call.
"""
def __init__(self, service='datastore_v3', deadline=None, callback=None,
read_policy=STRONG_CONSISTENCY):
super(DatastoreRPC, self).__init__(service, deadline, callback)
self.read_policy = read_policy
def make_call(self, call, request, response):
if self.read_policy == EVENTUAL_CONSISTENCY:
if call not in _ALLOWED_FAILOVER_READ_METHODS:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
if call != 'Next':
request.set_failover_ms(ARBITRARY_FAILOVER_MS)
super(DatastoreRPC, self).make_call(call, request, response)
def clone(self):
"""Make a shallow copy of this instance.
This is usually used when an RPC has been specified with some configuration
options and is being used as a template for multiple RPCs outside of a
developer's easy control.
"""
assert self.state == apiproxy_rpc.RPC.IDLE
return self.__class__(
self.service, self.deadline, self.callback, self.read_policy)
def Put(entities, **kwargs):
"""Store one or more entities in the datastore.
The entities may be new or previously existing. For new entities, Put() will
fill in the app id and key assigned by the datastore.
If the argument is a single Entity, a single Key will be returned. If the
argument is a list of Entity, a list of Keys will be returned.
Args:
entities: Entity or list of Entities
rpc: datastore.RPC to use for this request.
Returns:
Key or list of Keys
Raises:
TransactionFailedError, if the Put could not be committed.
"""
rpc = GetRpcFromKwargs(kwargs)
entities, multiple = NormalizeAndTypeCheck(ent |
gniezen/n3pygments | swlexers/__init__.py | Python | bsd-2-clause | 13,819 | 0.011578 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sw
~~~~~~~~~~~~~~~~~~~~~
Lexers for semantic web languages.
:copyright: 2007 by Philip Cooper <philip.cooper@openvest.com>.
:license: BSD, see LICENSE for more details.
Modified and extended by Gerrit Niezen. (LICENSE file described above is missing, wasn't distributed with original file)
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal
from pygments.util import shebang_matches
__all__ = ['Notation3Lexer','SparqlLexer']
# The N3 lexer should be close to the not really correct grammar at
# http://www.w3.org/2000/10/swap/grammar/n3-ietf.txt
# Comments indicate to which grammar rule the various regular
# expressions correspond.
_explicit_uri = r'<[^>]*>'
_qname = r'((\w[-\w]*)?:)?\w[-\w]*|(\w[-\w]*)?:' #(([:letter:][-\w]*)?:)?[:letter:][.\w]*
_symbol = '(' + _qname + '|' + _explicit_uri +')'
_quickvariable = r'\?\w+'
def expression(symbolAction, nextState):
#expression ::= | pathitem pathtail
#pathitem ::= | "(" pathlist ")"
# | "[" propertylist "]"
# | "{" formulacontent "}"
# | boolean
# | literal
# | numericliteral
# | quickvariable
# | symbol
if not isinstance(nextState,tuple):
nextState = (nextState,)
nextState = nextState + ('pathtail',)
return [
#pathlist
(r'\(', Punctuation, nextState + ('list',)),
#properylist
(r'\[', Punctuation, nextState + ('propertyList',)),
#formulacontent
(r'\{', Punctuation, nextState + ('root',)),
#boolean
(r'@false|@true', Keyword.Constant, nextState),
#literal
(r'("""[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*""")|("[^"\\]*(?:\\.[^"\\]*)*")', String, nextState + ('dtlang',)),
#numericliteral ::= double|integer|rational
(r'[-+]?[0-9]+(\.[0-9]+)?([eE][-+]?[0-9]+)', Number.Float, nextState),
(r'[-+]?[0-9]+', Number.Integer, nextState),
(r'[-+]?[0-9]+/[0-9]+', Number, nextState),
#quickvariable
(_quickvariable, Name.Variable, nextState),
#symbol
(_symbol, symbolAction, nextState),
]
class Notation3Lexer(RegexLexer):
"""
Lexer for the N3 / Turtle / NT
"""
name = 'N3'
aliases = ['n3', 'turtle']
filenames = ['*.n3', '*.ttl', '*.NT']
mimetypes = ['text/rdf+n3','application/x-turtle','application/n3']
tokens = {
'whitespaces': [
(r'(#.*)', Comment),
(r'\s+', Text),
],
'pathtailExpression': expression(Name.Function, '#pop'),
'pathtail': [
# No whitespaces allowed in front!
(r'(^|!|\.)(?!\s)', Operator, 'pathtailExpression'),
(r'', Text, '#pop'),
],
# statement:
'root': [
include('whitespaces'),
# declaration ::= base|prefix|keywords
(r'(@(?:prefix|base)\s*)([^\!\"\#\$\&\'\(\)\*\,\+\/\;\<\=\>\?\@\[\\\]\^\`\{\|\}\~]*:\s+)?(<[^>]*>\s*\.)', bygroups(Keyword,Name.Variable,Name.Namespace)),
(r'(@keywords)(\s*\w+\s*,)*(\s*\w+)', bygroups(Keyword,Text,Text)),
# existential|universal
(r'@forSome|@forAll', Name.Class, 'symbol_csl'),
# Terminating a formula
(r'\}', Punctuation, '#pop'),
] + expression(Name.Class, 'propertyList'),
'propertyList': [
#predicate ::= | "<="
# | "="
# | "=>"
# | "@a"
| # | "@has" expression
# | " | @is" expression "@of"
# | expression
include('whitespaces'),
(r';', Punctuation),
(r'(<=|=>|=|@?a(?=\s))', Operator, 'objectList'),
(r'\.', Punctuation, '#pop'),
(r'\]', Punctuation, '#pop'),
(r'(?=\})', Text, '#pop'),
] + expression(Name.Function, 'objectList'),
'objectList': [
include('whitespaces'),
(r',', Punctuation),
(r'(?=;)', Text, '#pop'),
(r'(?=\.)', Text, '#pop'),
(r'(?=\])', Text, '#pop'),
(r'(?=\})', Text, '#pop'),
] + expression(Name.Attribute, ()),
'list': [
include('objectList'),
(r'\)', Punctuation, '#pop'),
],
'symbol_csl': [
include('whitespaces'),
(r',', Punctuation),
(_symbol, Name.Variable),
(r'.', Punctuation, '#pop'),
],
'dtlang': [
#dtlang ::= "@" langcode|"^^" symbol|void
(r'@[a-z]+(-[a-z0-9]+)*', Name.Attribute, '#pop'),
(r'\^\^'+_symbol, Name.Attribute, '#pop'),
(r'', Text, '#pop'),
],
}
class SparqlLexer(RegexLexer):
"""
Lexer for SPARQL Not Complete
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.sparql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'comments': [
(r'(\s*#.*)', Comment)
],
'root': [
include('comments'),
(r'(\s*(?:PREFIX|BASE)\s+)([\w-]*:[\w-]*)?(\s*<[^> ]*>\s*)',bygroups(Keyword,Name.Variable,Name.Namespace)),
(r'(\s*#.*)', Comment),
(r'(\s*)(SELECT\s*(?:DISTINCT|REDUCED)?)(\s*)',bygroups(Text, Keyword,Text), 'selectVars'),
(r'(\s*)((?:ASK|CONSTRUCT|DESCRIBE)\s*(?:DISTINCT|REDUCED)?\s*)((?:\?[a-zA-Z0-9_-]+\s*)+|\*)(\s*)',
bygroups(Text, Keyword,Name.Variable,Text)),
(r'(\s*)((?:LOAD|CLEAR|DROP|CREATE)\s*(?:SILENT)?\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)(;)(\s*)',
bygroups(Text, Keyword, Keyword, Name.Attribute, Text, Text)),
(r'(\s*)((?:ADD|MOVE|COPY)\s*(?:SILENT)?\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)((?:TO)\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)?(;)(\s*)',
bygroups(Text, Keyword, Keyword, Name.Attribute, Keyword, Keyword, Name.Attribute, Text, Text)),
(r'(\s*)((?:INSERT|DELETE)\s*(?:DATA)?)\s*',bygroups(Text, Keyword),'quaddata'),
(r'(\s*)(CONSTRUCT)?\s*({)',bygroups(Text, Keyword,Punctuation),'graph'),
(r'(\s*)(FROM\s*(?:NAMED)?)(\s*.*)', bygroups(Text, Keyword,Text)),
(r'(\s*)(WHERE\s?)?\s*({)',bygroups(Text, Keyword, Punctuation),'groupgraph'),
(r'(\s*)(LIMIT|OFFSET)(\s*[+-]?[0-9]+)',bygroups(Text, Keyword,Literal.String)),
(r'(ORDER BY (?:ASC|DESC)\s*)(\()\s*',bygroups(Text, Keyword,Punctuation),'bindgraph'),
(r'(\s*)(})', bygroups(Text, Punctuation)),
],
'selectVars':[
(r'(\s*)(\*)(\s*)',bygroups(Text,Keyword,Text), '#pop'),
(r'(?=\s*(FROM|WHERE|GROUP|HAVING|ORDER|LIMIT|OFFSET))', Text, '#pop'),
(r'(\s*)(\()(\s*)', bygroups(Text, Punctuation, Text), 'bindgraph'),
include('variable'),
(r'\n', Text),
(r'', Text, '#pop'),
],
'quaddata':[
(r'(\s*)({)(\s*)(GRAPH)(\s*<[^> ]*>\s*)', bygroups(Text, Punctuation, Text, Keyword, Name.Attribute), 'quads'),
(r'(\s*)({)(\s*)',bygroups(Text,Punctuation,Text), 'graph'),
(r'', Text, '#pop'),
],
'quads':[
(r'(\s*)({)(\s*)(GRAPH)(\s*<[^> ]*>\s*)', bygroups(Text, Punctuation, Text, Keyword, Name.Attribute), '#push'),
(r'(\s*)({)(\s*)', bygroups(Text,Punctuation,Text), 'graph'),
(r'(\s*)(})(\s*)', bygroups(Text,Punctuation,Text), '#pop'),
],
'groupgraph':[
(r'(\s*)(UNION)(\s*)({)(\s*)', bygroups(Text, Keyword, Text, Punctuation, Text), '#push'),
(r'(\s*)({)(\s*)',bygroups(Text, Punctuation, Text), '#push'),
include('graph'),
include('root'),
(r'', Text, '#pop'),
],
'graph':[
(r'(\s*)(<[^>]*\>)', bygroups(Text, Name.Class), ('triple','predObj')),
(r'(\s*[a- |
superzerg/TCD1304AP_teensy2pp | read_pixels.py | Python | gpl-3.0 | 2,341 | 0.032465 | #!/usr/bin/python3 -i
import serial # if you have not already done so
from time import sleep
import matplotlib.pyplot as plt
import re
import datetime
import numpy
import pickle
class DataExtruder:
def __init__(self,port='/dev/ttyACM0',baudrate=115200):
self.pattern_pixels=re.compile(r'data=(?P<pixels>[\w ]*) \((?P<nerror>\d*) errors')
self.port=port
self.baudrate=baudrate
self.ser = None
self.data={
'pixels':[],
'nerror':[]
}
self.figu | re=plt.figure(figsize=[20,8])
self.figure.show()
self.figure_axe=self.figure.gca()
def acquire(self,plot=True):
if self.ser is None:
self.ser=serial.Serial(self.port, self.baudrate)
else:
print('serial connection alredy opened')
print('starting acquisition, press Ctrl+C to stop.')
try:
while True:
data_serial=self.ser.readline().decode('utf-8')
m=self.pattern_pixels.match(data_serial)
if m:
pixels_num=[]; |
pixels_ascii=m.group('pixels');
i=0
npixel=0
while i+1<len(pixels_ascii):
if pixels_ascii[i]==' ':
if pixels_ascii[i+1]==' ':
pixels_num.append(-1)
i=i+2
else:
print('ERROR reading pixel')
break
else:
pixel=255-int(pixels_ascii[i:i+2],16)
pixels_num.append(pixel)
i=i+2
npixel=npixel+1
self.data['pixels'].append(pixels_num)
self.data['nerror'].append(int(m.group('nerror')))
if plot:
self.plot_pixels()
sleep(0.05)
except KeyboardInterrupt:
pass
self.ser.close()
self.ser=None
def plot_pixels(self):
plt.cla()
self.figure_axe.set_position([0.05,0.1,0.94,0.8])
if len(self.data['pixels'])==0:
return
last_reading=self.data['pixels'][len(self.data['pixels'])-1]
if len(last_reading)!=3648:
return
x=range(1,3649)
self.plt_pixels,=plt.plot(x,last_reading,'b-')
self.figure_axe.set_ylim([-1,255])
self.figure_axe.set_xlim([1,3648])
self.figure_axe.set_ylabel('pixel value')
self.figure_axe.set_xlabel('pixel')
plt.draw()
if __name__ == '__main__':
test=DataExtruder(port='/dev/ttyACM0',baudrate=115200)
test.acquire()
|
cpacia/python-libbitcoinclient | obelisk/__init__.py | Python | agpl-3.0 | 423 | 0.009456 | from binary import *
from bitcoin import *
from client import *
from models import *
from transaction import *
from zmqbase import MAX_UINT32
from twisted.internet import reactor
def select_network(network):
| import config
if "test" in network.lower():
config.chain = config.testnet_chain
else:
config.chain = config.mainnet_chain
def start():
reactor.run()
def stop | ():
reactor.stop()
|
kbrannan/PyHSPF | src/pyhspf/core/hspfplots.py | Python | bsd-3-clause | 89,979 | 0.033619 | # HSPF Model Plot Routines
#
# David J. Lampert, PhD, PE
#
# Last updated: 10/16/2013
#
# Purpose: Lots of routines here to generate images for development of an HSPF
# model. Descriptions below.
#
from matplotlib import pyplot, gridspec, path, patches, ticker, dates
from calendar import isleap
from scipy import stats, log10
from itertools import chain
import numpy as np, os, datetime, math
def average(l):
if len(l) > 0: return sum(l) / len(l)
else: return None
def stderr(l):
if len(l) > 2: return 1.96 * np.std(l) / np.sqrt(len(l))
else: return None
def dayofyear(dates, values):
"""Returns the day of the water year average for the timeseries."""
year = dates[0].year
while not isleap(year): year += 1
delta = datetime.timedelta(days = 1)
if datetime.datetime(year, 1, 1) > dates[-1]:
wateryear = [datetime.date(year - 2, 10, 1) + i * delta
for i in range(365)]
else:
wateryear = [datetime.date(year - 1, 10, 1) + i * delta
for i in range(366)]
watervalues = [average([v for t, v in zip(dates, values)
if t.month == day.month and t.day == day.day and
v is not None])
for day in | wateryear]
return wateryear, watervalues
def monthofyear(dates, values):
"""Returns the month of the water year average for the timeseries."""
if len(values) > 12:
watervalues = [average([values[j] for j in range(i, len(values), 12)])
for i in range(12)]
els | e:
watervalues = values
months = [d.month for d in dates]
i = months.index(10)
watervalues = (watervalues[i:] + watervalues[:i])
return watervalues
def dayofyear_range(dates, values):
"""Returns the day of the water year average for the timeseries."""
year = dates[0].year
while not isleap(year): year += 1
delta = datetime.timedelta(days = 1)
wateryear = [datetime.date(year - 1, 10, 1) + i * delta for i in range(366)]
watervalues = [stderr([v for t, v in zip(dates, values)
if t.month == day.month and t.day == day.day and
v is not None])
for day in wateryear]
return watervalues
def make_patch(points, facecolor, edgecolor = 'Black', width = 1, alpha = None,
hatch = None, label = None):
"""Uses a list or array of points to generate a matplotlib patch."""
vertices = [(point[0], point[1]) for point in points]
vertices.append((points[0][0], points[0][1]))
codes = [path.Path.LINETO for i in range(len(points) + 1)]
codes[0] = path.Path.MOVETO
patch = patches.PathPatch(path.Path(vertices, codes), facecolor = facecolor,
edgecolor = edgecolor, lw = width, hatch = hatch,
alpha = alpha, label = label)
return patch
def plot_hydrograph(HUC8, precipitation, simulated_flow, simulated_evap,
potential_evap, observed_flow = None, tstep = 'daily',
flow_color = 'red', prec_color = 'blue',
pet_color = 'orange', evap_color = 'green',
titlesize = 16, axsize = 14, ticksize = 12,
units = 'Metric',
output = None, show = True, verbose = False):
"""Makes a plot of precipitation and evapotranspiration."""
if verbose: print('making a plot of the hydrology simulation')
fig = pyplot.figure(figsize = (12,9))
ax1 = pyplot.subplot2grid((3,1),(0,0), rowspan = 2)
if tstep == 'daily':
title = '%s HSPF Hydrology Simulation, Daily Values\n' % HUC8
flow_lw = 0.3
prec_lw = 0.5
pet_lw = 1.
evap_lw = 0.3
flow_marker = 4
flow_marker_lw = 0.3
elif tstep == 'monthly':
title = '%s HSPF Hydrology Simulation, Monthly Values\n' % HUC8
flow_lw = 0.6
prec_lw = 0.6
pet_lw = 2.
evap_lw = 0.6
flow_marker = 8
flow_marker_lw = 0.6
else:
print('unknown time step specified')
return
ax1.set_title(title, fontsize = titlesize)
ax1.plot_date(x = simulated_flow[0], y = simulated_flow[1], lw = flow_lw,
label = 'simulated flow', fmt = '-', color = flow_color)
if tstep == 'monthly':
ax1.fill_between(simulated_flow[0], 0, simulated_flow[1],
facecolor = flow_color, alpha = 0.3)
if observed_flow is not None:
ax1.plot_date(x = observed_flow[0], y = observed_flow[1],
label = 'observed flow', marker = 's',
markeredgewidth = flow_marker_lw,
markeredgecolor = flow_color,
markerfacecolor = 'None', markersize = flow_marker)
if units == 'Metric': l = 'Flow (m\u00B3/s)'
elif units == 'English': l = 'Flow (ft\u00B3/s)'
ax1.set_ylabel(l, color = flow_color, size = axsize)
# set the y limits to half (to see both precip and flow)
xmin, xmax, ymin, ymax = ax1.axis()
ax1.set_ylim([0, 2 * ymax])
# add the precipitation graph
ax2 = ax1.twinx()
ax2.plot_date(x = precipitation[0], y = precipitation[1], lw = prec_lw,
label = 'precipitation', fmt = '-', color = prec_color)
# fill in the precipitation if it's monthly
#if tstep == 'monthly':
ax2.fill_between(precipitation[0], 0, precipitation[1],
facecolor = prec_color, alpha = 0.3)
if units == 'Metric': l = 'Precipitation (mm)'
elif units == 'English': l = 'Precipitation (in)'
ax2.set_ylabel(l, color = prec_color, fontsize = axsize)
ax2.invert_yaxis()
# set the y limits to half (to see both precip and flow)
xmin, xmax, ymin, ymax = ax2.axis()
ax2.set_ylim([2 * ymin, 0])
for t in (ax1.yaxis.get_ticklabels() +
ax2.yaxis.get_ticklabels()):
t.set_fontsize(ticksize)
# customize the tick marks and gridlines
ax1.tick_params(axis = 'x', gridOn = 'on')
ax1.tick_params(axis = 'y', size = ticksize, colors = flow_color,
gridOn = 'on')
ax2.tick_params(axis = 'y', colors = prec_color)
for t in (ax1.yaxis.get_ticklines() + ax2.yaxis.get_ticklines() +
ax1.xaxis.get_ticklabels()):
t.set_visible(False)
# add the evaporation data
times, evaps = simulated_evap
ax3 = pyplot.subplot2grid((3,1),(2,0), sharex = ax1)
ax3.plot_date(x = times, y = evaps, label = 'simulated evapotranspiration',
fmt = '-', lw = evap_lw, color = evap_color)
ax3.fill_between(times, 0, evaps,
facecolor = evap_color, alpha = 0.5)
# add the potential evapotranspiration data
times, pets = potential_evap
ax3.plot_date(x = times, y = pets, label = 'potential evapotranspiration',
fmt = '-', lw = pet_lw, color = pet_color)
ax3.set_xlabel('Time', size = axsize)
if units == 'Metric': l = 'Evaporation (mm)'
elif units == 'English': l = 'Evaporation (in)'
ax3.set_ylabel(l, size = axsize, color = evap_color)
xmin, xmax, ymin, ymax = ax3.axis()
ax3.set_ylim([0, ymax])
# set tick size and add gridlines
ax3.tick_params(axis = 'both', gridOn = 'on')
ax3.tick_params(axis = 'y', size = ticksize, colors = evap_color)
for t in (ax3.xaxis.get_ticklabels() + ax3.yaxis.get_ticklabels()):
t.set_fontsize(ticksize)
for t in ax3.yaxis.get_ticklines(): t.set_visible(False)
# add a legend
hs, ls = zip(*chain(zip(*ax1.get_legend_handles_labels()),
zip(*ax2.get_legend_handles_labels()),
zip(*ax3.get_legend_handles_labels())))
leg = ax3.legend(hs, ls, loc = 'upper center', ncol = math.ceil(len(hs)/2),
bbox_to_anchor = (0.5, -0.25))
legtext = leg.get_texts()
pyplot.setp(legtext, fontsize = ticksize)
pyplot.tight_layout()
# show it
if output is not None:
pyplot.sav |
ativelkov/murano-api | murano/api/v1/deployments.py | Python | apache-2.0 | 5,151 | 0 | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import desc
from webob import exc
from murano.api.v1 import request_statistics
from murano.common import policy
from murano.common import utils
from murano.db import models
from murano.db import session as db_session
from murano.openstack.common.gettextutils import _ # noqa
from murano.openstack.common import log as logging
from murano.openstack.common import wsgi
LOG = logging.getLogger(__name__)
API_NAME = 'Deployments'
class Controller(object):
@request_statistics.stats_count(API_NAME, 'Index')
def index(self, request, environment_id):
target = {"environment_id": environment_id}
policy.check("list_deployments", request.context, target)
unit = db_session.get_session()
verify_and_get_env(unit, environment_id, request)
query = unit.query(models.Deployment) \
.filter_by(environment_id=environment_id) \
.order_by(desc(models.Deployment.created))
result = query.all()
deployments = [set_dep_state(deployment, unit).to_dict() for deployment
in result]
return {'deployments': deployments}
@request_statistics.stats_count(API_NAME, 'Statuses')
def statuses(self, request, environment_id, deployment_id):
target = {"environment_id": environment_id,
"deployment_id": deployment_id}
policy.check("statuses_deployments", request.context, target)
unit = db_session.get_session()
query = unit.query(models.Status) \
.filter_by(deployment_id=deployment_id) \
.order_by(models.Status.created)
deployment = verify_and_get_deployment(unit, environment_id,
deployment_id)
if 'service_id' in request.GET:
service_id_set = set(request.GET.getall('service_id'))
environment = deployment.description
entity_ids = []
for service in environment.get('services', []):
if service['?']['id'] in service_id_set:
id_map = utils.build_entity_map(service)
entity_ids = entity_ids + id_map.keys()
if entity_ids:
query = query.filter(models.Status.entity_id.in_(entity_ids))
else:
return {'reports': []}
result = query.all()
return {'reports': [status.to_dict() for status in result]}
def verify_and_get_env(db_session, environment_id, request):
environment = db_session.query(models.Environment).get(environment_id)
if not environment:
LOG.info(_('Environment with id {0} not found').format(environment_id))
raise exc.HTTPNotFound
if environment.tenant_id != request.context.tenant:
LOG.info(_('User is not authorized to access this tenant resources.'))
raise exc.HTTPUnauthorized
return environment
def _patch_description(description):
description['services'] = description.get('applications', [])
del description['applications']
def verify_and_get_deployment(db_session, environment_id, deployment_id):
deployment = db_session.query(models.Deployment).get(deployment_id)
if not deployment:
LOG.info(_('Deployment with id {0} not found').format(deployment_id))
raise exc.HTTPNotFound
if deployment.environment_id != environment_id:
LOG.info(_('Deployment with id {0} not found'
' in environment {1}').format(deployment_id,
environment_id))
raise exc.HTTPBadRequest
_patch_description(deployment.description)
return deployment
def create_resource():
retu | rn wsgi.Resource(Controller())
def set_dep_state(deployment, unit):
num_errors = unit.query(models.Status).filter_by(
level='error',
deployment_id=deployment.id).count()
num_warnings = unit.query(mo | dels.Status).filter_by(
level='warning',
deployment_id=deployment.id).count()
if deployment.finished:
if num_errors:
deployment.state = 'completed_w_errors'
elif num_warnings:
deployment.state = 'completed_w_warnings'
else:
deployment.state = 'success'
else:
if num_errors:
deployment.state = 'running_w_errors'
elif num_warnings:
deployment.state = 'running_w_warnings'
else:
deployment.state = 'running'
_patch_description(deployment.description)
return deployment
|
danieljwest/mycli | mycli/config.py | Python | bsd-3-clause | 4,404 | 0.001135 | import shutil
from io import BytesIO, TextIOWrapper
import logging
import os
from os.path import expanduser, exists
import struct
from configobj import ConfigObj
from Crypto.Cipher import AES
logger = logging.getLogger(__name__)
def load_config(usr_cfg, def_cfg=None):
cfg = ConfigObj()
cfg.merge(ConfigObj(def_cfg, interpolation=False))
cfg.merge(ConfigObj(expanduser(usr_cfg), interpolation=False))
cfg.filename = expanduser(usr_cfg)
return cfg
def write_default_config(source, destination, overwrite=False):
destination = expanduser(destination)
if not overwrite and exists(destination):
return
shutil.copyfile(source, destination)
def get_mylogin_cnf_path():
"""Return the path to the .mylogin.cnf file or None if doesn't exist."""
app_data = os.getenv('APPDATA')
if app_data is None:
mylogin_cnf_dir = os.path.expanduser('~')
else:
mylogin_cnf_dir = os.path.join(app_data, 'MySQL')
mylogin_cnf_dir = os.path.abspath(mylogin_cnf_dir)
mylogin_cnf_path = os.path.join(mylogin_cnf_dir, '.mylogin.cnf')
if exists(mylogin_cnf_path):
logger.debug("Found login path file at '{0}'".format(mylogin_cnf_path))
return mylogin_cnf_path
return None
def open_mylogin_cnf(name):
"""Open a readable version of .mylogin.cnf.
Returns the file contents as a TextIOWrapper object.
:param str name: The pathname of the file to be opened.
:return: the login path file or None
"""
try:
with open(name, 'rb') as f:
plaintext = read_and_decrypt_mylogin_cnf(f)
except (OSError, IOError):
logger.error('Unable to open login path file.')
return None
if not isinstance(plaintext, BytesIO):
logger.error('Unable to read login path file.')
return None
return TextIOWrapper(plaintext)
def read_and_decrypt_mylogin_cnf(f):
"""Read and decrypt the contents of .mylogin.cnf.
This decryption algorithm mimics the code in MySQL's
mysql_config_editor.cc.
The login key is 20-bytes of random non-printable ASCII.
It is written to the actual login path file. It is used
to generate the real key used in the AES cipher.
:param f: an I/O object opened in binary mode
:return: the decrypted login path file
:rtype: io.BytesIO or None
"""
# Number of bytes used to store the length of ciphertext.
MAX_CIPHER_STORE_LEN = 4
LOGIN_KEY_LEN = 20
# Move past the unused buffer.
buf = f.read(4)
if not buf or len(buf) != 4:
logger.error('Login path file is blank or incomplete.')
return None
# Read the login key.
key = f.read(LOGIN_KEY_LEN)
# Generate the real key.
rkey = [0] * 16
for i in range(LOGIN_KEY_LEN):
try:
rkey[i % 16] ^= ord(key[i:i+1])
except TypeError:
# ord() was unable to get the value of the byte.
logger.error('Unable to generate login path AES key.')
return None
rkey = struct.pack('16B', *rkey)
# Create a cipher object using the key.
aes_cipher = AES.new(rkey, AES.MODE_ECB)
# Create a bytes buffer to hold the plaintext.
plaintext = BytesIO()
while True:
# Read the length of the ciphertext.
len_buf = f.read(MAX_CIPHER_STORE_LEN)
if len(len_buf) < MAX_CIPHER_STORE_LEN:
break
cipher_len, = struct.unpack("<i", len_buf)
# Read cipher_len bytes from the file and decrypt.
cipher = f.read(cipher_len)
pplain = aes_cipher.decrypt(cipher)
try:
# Determine pad length.
pad_len = ord(pplain[-1:])
except TypeError:
# ord() was unable to get the value of the byte.
logger.warning('Unable to remove pad.')
continue
if pad_len > len(pplain) or len(set(pplain[-pad_len:])) != 1:
# Pad length should be less than or equal to the length of the
# plaintext. The pad should have a single unqiue byte.
| logger.warning('Invalid pad found in login path file.')
continue
# Get rid of pad.
plain = pplain[:-pad_len]
plaintext.write(plain)
if plaintext.tell() == 0:
logger.error('No data successfully decrypted from login path file.')
return None
plaintext.seek(0)
| return plaintext
|
CellulaProject/icc.cellula | src/icc/data/recoll/filters/rclxls.py | Python | lgpl-3.0 | 2,469 | 0.003645 | #!/usr/bin/env python2
# Extractor for Excel files.
# Mso-dumper is not compatible with Python3. We use sys.executable to
# start the actual extractor, so we need to use python2 too.
import rclexecm
import rclexec1
import xlsxmltocsv
import re
import sy | s
import os
import xml.sax
class XLSProcessData:
def __init__(self, em, ishtml = False):
self.em = em
self.out = ""
self.gotdata = 0
self.xmldata = ""
self.ishtml = ishtml
def takeLine(self, line):
| if self.ishtml:
self.out += line + "\n"
return
if not self.gotdata:
self.out += '''<html><head>''' + \
'''<meta http-equiv="Content-Type" ''' + \
'''content="text/html;charset=UTF-8">''' + \
'''</head><body><pre>'''
self.gotdata = True
self.xmldata += line
def wrapData(self):
if self.ishtml:
return self.out
handler = xlsxmltocsv.XlsXmlHandler()
data = xml.sax.parseString(self.xmldata, handler)
self.out += self.em.htmlescape(handler.output)
return self.out + '''</pre></body></html>'''
class XLSFilter:
def __init__(self, em):
self.em = em
self.ntry = 0
def reset(self):
self.ntry = 0
pass
def getCmd(self, fn):
if self.ntry:
return ([], None)
self.ntry = 1
# Some HTML files masquerade as XLS
try:
data = open(fn, 'rb').read(512)
if data.find('html') != -1 or data.find('HTML') != -1:
return ("cat", XLSProcessData(self.em, True))
except Exception as err:
self.em.rclog("Error reading %s:%s" % (fn, str(err)))
pass
cmd = rclexecm.which("xls-dump.py")
if cmd:
# xls-dump.py often exits 1 with valid data. Ignore exit value
return ([sys.executable, cmd, "--dump-mode=canonical-xml", \
"--utf-8", "--catch"],
XLSProcessData(self.em), rclexec1.Executor.opt_ignxval)
else:
return ([], None)
if __name__ == '__main__':
if not rclexecm.which("xls-dump.py"):
print("RECFILTERROR HELPERNOTFOUND ppt-dump.py")
sys.exit(1)
proto = rclexecm.RclExecM()
filter = XLSFilter(proto)
extract = rclexec1.Executor(proto, filter)
rclexecm.main(proto, extract)
|
SabatierBoris/CecileWebSite | pyramidapp/forms/__init__.py | Python | gpl-2.0 | 53 | 0 | # vim: set | fileencoding=utf-8 :
"""
Forms module
""" | |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/static/tatooine/jano.py | Python | lgpl-3.0 | 1,147 | 0.026155 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('tatooine_opening_jano')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTe | mplate.setSocialGroup("township")
mobileTemplate.setOptionsBitmask(Options.INVULNERABLE | Options.CONVERSABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_opening_jano.iff')
mobileTemplate.setTemp | lates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('jano', mobileTemplate)
return |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/connectivity_issue.py | Python | mit | 2,121 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityIssue(Model):
"""Information about an issue encountered in the process of checking for
connectivity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar origin: The origin of the issue. Possible values inclu | de: 'Local',
'Inbound', 'Outbound'
:vartype origin: str or | ~azure.mgmt.network.v2017_11_01.models.Origin
:ivar severity: The severity of the issue. Possible values include:
'Error', 'Warning'
:vartype severity: str or ~azure.mgmt.network.v2017_11_01.models.Severity
:ivar type: The type of issue. Possible values include: 'Unknown',
'AgentStopped', 'GuestFirewall', 'DnsResolution', 'SocketBind',
'NetworkSecurityRule', 'UserDefinedRoute', 'PortThrottled', 'Platform'
:vartype type: str or ~azure.mgmt.network.v2017_11_01.models.IssueType
:ivar context: Provides additional context on the issue.
:vartype context: list[dict[str, str]]
"""
_validation = {
'origin': {'readonly': True},
'severity': {'readonly': True},
'type': {'readonly': True},
'context': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'context': {'key': 'context', 'type': '[{str}]'},
}
def __init__(self, **kwargs):
super(ConnectivityIssue, self).__init__(**kwargs)
self.origin = None
self.severity = None
self.type = None
self.context = None
|
GuiASousa/Estudos | Minicursopy/SomaBinária.py | Python | gpl-3.0 | 600 | 0.020067 | def somaBin(bin1,bin2):
resto = 0
for e in range(7, -1, -1):
somado[e] = bin1[e] + bin2[e] + resto
if somado[e] > 1:
if somado[e] == 2:
resto = 1
somado[e] -= 2
elif somado[e] == 3:
res | to = 1
somado[e] -= 2
else:
resto = 0
return somado
bin1 = []
bin2 = []
somado = [0,0,0,0,0,0,0,0]
for x in range(0,8):
bin1.append(int( | input('Binário 1: ')))
for d in range(0,8):
bin2.append(int(input('Binário 2: ')))
print(somaBin(bin1,bin2))
|
kevin-coder/tensorflow-fork | tensorflow/python/data/experimental/ops/random_ops.py | Python | apache-2.0 | 2,234 | 0.003133 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for random number generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.RandomDataset", v1=[])
class RandomDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` of pseudorandom values."""
def __init__(self, seed=None):
"""A `Dataset` of pseudorandom values."""
self._seed, self._se | ed2 = random_seed.get_seed(seed)
variant_tensor = gen_experimental_dataset_ops.experimental_random_dataset(
seed=self._seed, seed2=self._seed2, **dataset_ops.flat_ | structure(self))
super(RandomDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.int64, [])
@tf_export(v1=["data.experimental.RandomDataset"])
class RandomDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of pseudorandom values."""
@functools.wraps(RandomDatasetV2.__init__)
def __init__(self, seed=None):
wrapped = RandomDatasetV2(seed)
super(RandomDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
RandomDataset = RandomDatasetV1
|
obi-two/Rebelion | data/scripts/templates/object/building/poi/shared_endor_ewok_medium4.py | Python | mit | 449 | 0.046771 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
r | esult.template = "object/building/poi/shared_endor_ewok_medium4.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
ret | urn result |
JoseALermaIII/python-tutorials | pythontutorials/books/AutomateTheBoringStuff/Ch05/P2_characterCount.py | Python | mit | 496 | 0.006048 | """Character count
This program | counts how often each character appears in a string.
"""
def main():
message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
"""str: Message to count characters."""
count = {}
"""dict: Characters as keys and counts as values."""
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
if __name__ == '__mai | n__':
main()
|
aYukiSekiguchi/ACCESS-Chromium | native_client_sdk/src/build_tools/nacl_sdk_scons/nacl_utils_test.py | Python | bsd-3-clause | 4,315 | 0.004403 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for nacl_utils.py."""
import fileinput
import mox
import nacl_utils
import os
import sys
import unittest
def TestMock(file_path, open_func):
temp_file = open_func(file_path)
temp_file.close()
class TestNaClUtils(unittest.TestCase):
"""Class for test cases to cover globally declared helper | functions."""
def setUp(self):
self.script_dir = os.path.abspath(os.path.dirname(__file__))
self.mock_factory = mox.Mox()
self.InitializeResourceMocks()
def InitializeResourceMocks(self):
"""Can be called multiple times if multiple functions need to be tested."""
self.fileinput_mock = self.mock_factory.CreateMock(fileinput)
self.os_mock = self.mock_factory.CreateMock(os)
self.sys_mock = self.mock_factory.CreateMock(sys)
def testToolchainPath(sel | f):
output = nacl_utils.ToolchainPath('nacl_sdk_root')
head, tail = os.path.split(output)
base, toolchain = os.path.split(head)
self.assertEqual('nacl_sdk_root', base)
self.assertEqual('toolchain', toolchain)
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
arch='nosucharch')
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
variant='nosuchvariant')
def testGetJSONFromNexeSpec(self):
valid_empty_json = '{\n "program": {\n }\n}\n'
null_json = nacl_utils.GetJSONFromNexeSpec(None)
self.assertEqual(null_json, valid_empty_json)
empty_json = nacl_utils.GetJSONFromNexeSpec({})
self.assertEqual(empty_json, valid_empty_json)
nexes = {'x86-32': 'nacl_x86_32.nexe',
'x86-64': 'nacl_x86_64.nexe',
'arm': 'nacl_ARM.nexe'}
json = nacl_utils.GetJSONFromNexeSpec(nexes)
# Assert that the resulting JSON has all the right parts: the "nexes"
# dict, followed by one entry for each architecture. Also make sure that
# the last entry doesn't have a trailing ','
json_lines = json.splitlines()
self.assertEqual(len(json_lines), 7)
self.assertEqual(json_lines[0], '{')
self.assertEqual(json_lines[1], ' "program": {')
self.assertTrue(json_lines[2].endswith(','))
self.assertTrue(json_lines[3].endswith(','))
self.assertFalse(json_lines[4].endswith(','))
self.assertEqual(json_lines[5], ' }')
self.assertEqual(json_lines[6], '}')
# Assert that the key-value pair lines have the right form. The order
# of the keys doesn't matter. Note that the key values are enclosed in
# "" (e.g. "x86-32") - this is intentional.
valid_arch_keys = ['"x86-32"', '"x86-64"', '"arm"']
for line in json_lines[2:4]:
key_value = line.split(':')
self.assertEqual(len(key_value), 3)
self.assertTrue(key_value[0].lstrip().rstrip() in valid_arch_keys)
def testGenerateNmf(self):
# Assert that failure cases properly fail.
self.assertRaises(ValueError, nacl_utils.GenerateNmf, None, None, None)
self.assertRaises(ValueError, nacl_utils.GenerateNmf, [], [], {})
def testGetArchFromSpec(self):
default_arch, default_subarch = nacl_utils.GetArchFromSpec(None)
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
default_arch, subarch = nacl_utils.GetArchFromSpec({'subarch': '64'})
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(subarch, '64')
arch, default_subarch = nacl_utils.GetArchFromSpec({'arch': 'x86'})
self.assertEqual(arch, 'x86')
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
arch, subarch = nacl_utils.GetArchFromSpec({'arch': 'x86', 'subarch': '64'})
self.assertEqual(arch, 'x86')
self.assertEqual(subarch, '64')
def RunTests():
return_value = 1
test_suite = unittest.TestLoader().loadTestsFromTestCase(TestNaClUtils)
test_results = unittest.TextTestRunner(verbosity=2).run(test_suite)
if test_results.wasSuccessful():
return_value = 0
return return_value
if __name__ == '__main__':
sys.exit(RunTests())
|
Llamatech/sis-fibo | model/vos/operacion.py | Python | gpl-2.0 | 2,405 | 0.012895 | #-*- coding:iso-8859-1 -*-
"""
Clase que modela la información de una cuenta en el sistema
"""
# NUMERO.valor.punto_atencion,cajero,cuenta,fecha
class Operacion(object):
def __init__(self, numero, tipo_operacion, cliente, valor, punto_atencion, cajero, cuenta, fecha):
self.numero = numero
self.tipo_operacion=tipo_operacion
self.cliente=cliente
self.valor = valor
self.punto_atencion = punto_atencion
self.cajero = cajero
self.cuenta = cuenta
self.fecha = fecha
def __repr__(self):
args = [self.numero, self.tipo_operacion, self.cliente, self.valor, self.punto_atencion, self.cajero, self.cuenta, self.fecha]
args = map(str, args)
return "numero: %s; cliente:%s; tipo_operacion:%s; valor: %s; punto_atencion:%s; cajero:%s; cuenta: %s; fecha: %s" % tuple(args)
def __str__(self):
return self.__repr__()
class OperacionR(object):
def __init__(self, numero, tipo_op, tipo, id_cliente, nombre, apellido, valor, punto_atencion, tipo_pa, id_oficina, nombre_oficina, cajero, nombre_emp, apellido_emp, cuenta, prestamo, fecha):
self.numero = numero
self.tipo_op = tipo_op
self.tipo = tipo
self.id_cliente = id_cliente
self.nombre = nombre
self.apellido = apellido
self.valor = valor
self.punto_atencion = punto_atencion
self.tipo_pa = tipo_pa
self.id_oficina = id_oficina
se | lf.nombre_oficina = nombre_oficina
| self.cajero = cajero
self.nombre_emp = nombre_emp
self.apellido_emp = apellido_emp
self.cuenta = cuenta
self.prestamo = prestamo
self.fecha = fecha.strftime('%d/%m/%Y')
def dict_repr(self):
d = {
'numero':self.numero,
'fecha':self.fecha,
'tipo':self.tipo,
'id_cliente':self.id_cliente,
'nombre':self.nombre,
'apellido':self.apellido,
'cuenta':self.cuenta,
'prestamo':self.prestamo,
'valor':self.valor,
'punto_atencion':self.punto_atencion,
'tipo_pa':self.tipo_pa,
'id_oficina':self.id_oficina,
'nombre_oficina':self.nombre_oficina,
'cajero':self.cajero,
'nombre_emp':self.nombre_emp,
'apellido_emp':self.apellido_emp
}
return d
|
zcwaxsshjd/TimeGrinder | RunGrinder.py | Python | apache-2.0 | 1,020 | 0.009804 | __author__ = 'minosniu'
import sys
import os
import uuid
import shutil
path_root = 'D:\\S2_L_forward_ | reaching'
expt = 'FES_reaching'
date = '20150929'
analyst = 'zcwaxs'
addr = 'mongodb://localhost:27017/'
patient = 'CXM'
side = 'left'
movement = 'forwar | d_reaching'
if __name__ == '__main__':
path = path_root
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) if not f.startswith('.')]
for f in files:
fullname = os.path.abspath(os.path.join(path, f))
print('Processing "%s"' % (f))
u = str(uuid.uuid1())
os.mkdir('./DataForS2/'+u)
shutil.copyfile(fullname, './DataForS2/'+u+'/'+f)
print('python Grinder.py "%s" "%s" "%s" "%s" "%s" "%s" "%s" "%s" "%s"' %
('./DataForS2/'+u+'/'+f, expt, date, analyst, addr, patient, side, movement, u))
os.system('python Grinder.py "%s" "%s" "%s" "%s" "%s" "%s" "%s" "%s" "%s"' %
('./DataForS2/'+u+'/'+f, expt, date, analyst, addr, patient, side, movement, u))
|
noahbenson/neuropythy | neuropythy/datasets/__init__.py | Python | agpl-3.0 | 779 | 0.008986 | ####################################################################################################
# neuropythy/datase | ts/__init__.py
# Datasets for neuropythy.
# by Noah C. Benson
# mainly just to force these to load when datasets is loaded:
from .benson_winawer_20 | 18 import (BensonWinawer2018Dataset)
from .hcp import (HCPDataset, HCPRetinotopyDataset, HCPMetaDataset)
from .visual_performance_fields import (VisualPerformanceFieldsDataset)
from .hcp_lines import (HCPLinesDataset)
# TODO: https://openneuro.org/crn/datasets/ds001499/snapshots/1.1.0/download -- add the BOLD5000
# : dataset to neuropythy (see bold5000.org)
# import this last so that we get the most updated version of data
from .core import (data, Dataset)
|
InfluxGraph/influxgraph | influxgraph/classes/lock.py | Python | apache-2.0 | 756 | 0 | import fcntl
import logging
logger = logging.getLogger('influxgraph.lock')
class FileLock(object):
__slots__ = ('handle', 'filename')
def __init__(self, filename):
self.filename = filename
try:
self.handle = open(self.filename, 'w')
except (IOError, OSError):
logger.critical("Could not create/open lock file %s",
self.filename,)
r | aise
def acquire(self):
fcntl.flo | ck(self.handle, fcntl.LOCK_EX)
def release(self):
fcntl.flock(self.handle, fcntl.LOCK_UN)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def __del__(self):
self.handle.close()
|
baryon5/mercury | codecompetitions/migrations/0007_auto_20140805_2253.py | Python | gpl-2.0 | 1,067 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('codecompetitions', '0006_auto_20140805_2234'),
]
operations = [
migrations.AddField(
model_name='problem',
name='expected_output',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='problem',
name='input_data',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='read_from_file',
field=models.CharField(blank=True, null=True, max_length=80),
preserve_default=True,
),
migration | s.AddField(
| model_name='problem',
name='time_limit',
field=models.PositiveIntegerField(default=5),
preserve_default=True,
),
]
|
ronaldbradford/os-demo | coverage/coverageindex.py | Python | apache-2.0 | 8,840 | 0.000113 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import sys
import os
import urllib2
import httplib # For the httplib.BadStatusLine Exception
import json
import logging
import pickle
import time
from coveragelink import CoverageLink
LINKS_JSON_FILE = 'links.json'
ZUUL_STATUS_FILE = 'status.json'
DEFAULT_ZUUL_STATUS_URL = 'http://zuul.openstack.org/' + ZUUL_STATUS_FILE
DEFAULT_OUTPUT_LOGS = 'http://logs.openstack.org'
PURGE_SECONDS = 60 * 5 # 5 minutes
class CoverageIndex(object):
@staticmethod
def read_from_url(zuul_status_url=DEFAULT_ZUUL_STATUS_URL):
"""Get the provided Zuul status file via provided url"""
try:
res = urllib2.urlopen(zuul_status_url)
json_contents = res.read()
with open(os.path.join(os.sep, 'tmp', ZUUL_STATUS_FILE), 'w') as f:
f.write(json_contents)
except (urllib2.HTTPError, httplib.BadStatusLine):
raise Exception('Unable to read Zuul status at ' + zuul_status_url)
try:
return json.loads(json_contents)
except ValueError:
raise Exception('Unable to parse JSON Zuul status at ' +
zuul_status_url)
@staticmethod
def read_from_file(filename=ZUUL_STATUS_FILE):
"""Read the Zuul status from the provided filename"""
try:
with open(filename, 'r') as f:
json_contents = f.read()
except IOError:
raise Exception('Unable to read Zuul status from ' + filename)
try:
return json.loads(json_contents)
except ValueError:
raise Exception('Unable to parse JSON Zuul status from ' +
filename)
def parse_status(self, data):
"""Parse the provided Zuul Status for post/check pipelines
and look for coverage jobs
"""
coverage_links = []
for pipeline in data['pipelines']:
if pipeline['name'] in ['post', 'check']:
links = self.process_pipeline(pipeline['name'],
pipeline['change_queues'])
coverage_links += links
return coverage_links
def process_pipeline(self, type, queues):
"""For the given pipeline queues identify coverage jobs
and generate the url for the project and pipeline type
"""
pipeline_post = 'post'
pipeline_check = 'check'
coverage_suffix = '-coverage'
report_dir = 'cover'
links = []
for queue in queues:
if queue['heads'] and len(queue['heads']) > 0:
for head in queue['heads'][0]:
id = head['id'].split(',', 2)[0]
for job in head['jobs']:
job_name = job['name']
project = job_name[:len(job_name) -
len(coverage_suffix)]
uri = []
# For 'post' pipeline coverage jobs
if job_name.endswith(coverage_suffix) and job['uuid']:
uuid_prefix = job['uuid'][:7]
if type == pipeline_post:
# e.g. http://logs.openstack.org/b8/b88aa ...
# /post/ironic-coverage/53a1364/cover/
uri = [id[:2], id, type, job_name,
uuid_prefix, report_dir]
elif type == pipeline_check:
# e.g. http://logs.openstack.org/27/219727/1
# /check/rally-coverage/3550a36/cover/
patchset = head['id'].split(',', 2)[1]
uri = [id[-2:], id, patchset, type, job_name,
uuid_prefix, report_dir]
if uri:
url = '/'.join(['http://logs.openstack.org'] + uri)
logging.debug(url)
link = CoverageLink(project, url, type)
links.append(link)
logging.info('Captured {} links for {} '.format(len(links), type))
return links
def validate_links(self, new_links):
"""Process the list of coverage urls to confirm they
exist and have a total line
"""
| for entry in new_links:
if entry:
try:
entry.validate()
except Exception as e:
logging.warn(str(e))
if int(time.time()) - entry.created > PURGE_SECONDS:
logging.debug("Purging old link " + e | ntry.url)
new_links.remove(entry)
continue
logging.info('URL verified ' + entry.url)
return
def read_existing_links(self, filename=LINKS_JSON_FILE):
"""Read the existing links file to append new validated
coverage links
"""
try:
with open(filename + '.obj', 'rb') as fo:
links = pickle.load(fo)
logging.info('Loaded {} existing links'.format(len(links)))
except IOError:
return []
return links
def trim_duplicates(self, links):
"""Look for older duplicate project entries and
remove them.
"""
new_links = []
projects = []
for entry in reversed(links):
if entry.project not in projects:
projects.append(entry.project)
new_links.append(entry)
else:
logging.warn('Removal of ' + entry.url)
logging.info('Removed {} duplicate project links'.format(
len(links) - len(new_links)))
return new_links
def publish_links(self, links, filename=LINKS_JSON_FILE):
"""Write the current valid links to the specified file"""
# Save the unique list of links either valid or invalid
# for future reprocessing
links = self.trim_duplicates(links)
try:
logging.info('Saving %d links for reuse' % (len(links)))
with open(filename + '.obj', 'wb') as fo:
pickle.dump(links, fo)
except IOError as e:
logging.error('I/O error({}): {}'.format(e.errno, e.strerror))
# Publish the valid links to a JSON file
valid_links = []
for entry in links:
if entry and entry.isValid():
valid_links.append(entry)
valid_links = self.trim_duplicates(valid_links)
json_links = []
for entry in valid_links:
json_links.append(entry.json())
try:
with open(filename, 'w') as f:
json.dump(json_links, f)
except IOError as e:
logging.error('I/O error({}): {}'.format(e.errno, e.strerror))
return
def __init__(self, filename=None):
logging.info('Processing started')
# Determine if to process url or provided file
if filename:
data = self.read_from_file(sys.argv[1])
else:
try:
data = self.read_from_url()
# if there is an error reading url or parsing url, try again
except Exception:
logging.warning(
'First attempt to read from url failed, retrying')
time.sleep(2)
data = self.read_from |
eadgarchen/tensorflow | tensorflow/python/debug/cli/analyzer_cli.py | Python | apache-2.0 | 58,062 | 0.004254 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CLI Backend for the Analyzer Part of the Debugger.
The analyzer performs post hoc analysis of dumped intermediate tensors and
graph structure information from debugged Session.run() calls.
The other part of the debugger is the stepper (c.f. stepper_cli.py).
"""
from __future__ import absolute_import
from __future__ import division |
from __future__ import print_function
import argparse
import copy
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
fro | m tensorflow.python.debug.cli import evaluator
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
# String constants for the depth-dependent hanging indent at the beginning
# of each line.
HANG_UNFINISHED = "| " # Used for unfinished recursion depths.
HANG_FINISHED = " "
HANG_SUFFIX = "|- "
# String constant for displaying depth and op type.
DEPTH_TEMPLATE = "(%d) "
OP_TYPE_TEMPLATE = "[%s] "
# String constants for control inputs/outputs, etc.
CTRL_LABEL = "(Ctrl) "
ELLIPSIS = "..."
SORT_TENSORS_BY_TIMESTAMP = "timestamp"
SORT_TENSORS_BY_DUMP_SIZE = "dump_size"
SORT_TENSORS_BY_OP_TYPE = "op_type"
SORT_TENSORS_BY_TENSOR_NAME = "tensor_name"
def _add_main_menu(output,
node_name=None,
enable_list_tensors=True,
enable_node_info=True,
enable_print_tensor=True,
enable_list_inputs=True,
enable_list_outputs=True):
"""Generate main menu for the screen output from a command.
Args:
output: (debugger_cli_common.RichTextLines) the output object to modify.
node_name: (str or None) name of the node involved (if any). If None,
the menu items node_info, list_inputs and list_outputs will be
automatically disabled, overriding the values of arguments
enable_node_info, enable_list_inputs and enable_list_outputs.
enable_list_tensors: (bool) whether the list_tensor menu item will be
enabled.
enable_node_info: (bool) whether the node_info item will be enabled.
enable_print_tensor: (bool) whether the print_tensor item will be enabled.
enable_list_inputs: (bool) whether the item list_inputs will be enabled.
enable_list_outputs: (bool) whether the item list_outputs will be enabled.
"""
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem(
"list_tensors", "list_tensors", enabled=enable_list_tensors))
if node_name:
menu.append(
debugger_cli_common.MenuItem(
"node_info",
"node_info -a -d -t %s" % node_name,
enabled=enable_node_info))
menu.append(
debugger_cli_common.MenuItem(
"print_tensor",
"print_tensor %s" % node_name,
enabled=enable_print_tensor))
menu.append(
debugger_cli_common.MenuItem(
"list_inputs",
"list_inputs -c -r %s" % node_name,
enabled=enable_list_inputs))
menu.append(
debugger_cli_common.MenuItem(
"list_outputs",
"list_outputs -c -r %s" % node_name,
enabled=enable_list_outputs))
else:
menu.append(
debugger_cli_common.MenuItem(
"node_info", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("print_tensor", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_inputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_outputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("run_info", "run_info"))
menu.append(
debugger_cli_common.MenuItem("help", "help"))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
class DebugAnalyzer(object):
"""Analyzer for debug data from dump directories."""
_TIMESTAMP_COLUMN_HEAD = "t (ms)"
_DUMP_SIZE_COLUMN_HEAD = "Size (B)"
_OP_TYPE_COLUMN_HEAD = "Op type"
_TENSOR_NAME_COLUMN_HEAD = "Tensor name"
# Op types to be omitted when generating descriptions of graph structure.
_GRAPH_STRUCT_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def __init__(self, debug_dump, config):
"""DebugAnalyzer constructor.
Args:
debug_dump: A DebugDumpDir object.
config: A `cli_config.CLIConfig` object that carries user-facing
configurations.
"""
self._debug_dump = debug_dump
self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)
# Initialize tensor filters state.
self._tensor_filters = {}
self._build_argument_parsers(config)
config.set_callback("graph_recursion_depth",
self._build_argument_parsers)
# TODO(cais): Implement list_nodes.
def _build_argument_parsers(self, config):
"""Build argument parsers for DebugAnalayzer.
Args:
config: A `cli_config.CLIConfig` object.
Returns:
A dict mapping command handler name to `ArgumentParser` instance.
"""
# Argument parsers for command handlers.
self._arg_parsers = {}
# Parser for list_tensors.
ap = argparse.ArgumentParser(
description="List dumped intermediate tensors.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-f",
"--tensor_filter",
dest="tensor_filter",
type=str,
default="",
help="List only Tensors passing the filter of the specified name")
ap.add_argument(
"-n",
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="filter op type by regex.")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_TENSORS_BY_TIMESTAMP,
help=("the field to sort the data by: (%s | %s | %s | %s)" %
(SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,
SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
self._arg_parsers["list_tensors"] = ap
# Parser for node_info.
ap = argparse.ArgumentParser(
description="Show information about a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an associated tensor, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-a",
"--attributes",
dest="attributes",
action="store_true",
help="Also list attributes of the node.")
ap.add_argument(
"-d",
"--dumps",
dest="dumps",
action="store_true",
help="Also list dumps available from the node.")
ap.add_argument(
"-t",
"--traceback",
dest="traceback",
action="store_true",
help="Also include the traceback of the node's |
jimmycallin/master-thesis | architectures/nn_discourse_parser/nets/util.py | Python | mit | 11,972 | 0.005429 | from collections import Counter
import sys
import numpy as np
import scipy as sp
from lexical_structure import WordEmbeddingDict
import dense_feature_functions as df
def _get_word2vec_ff(embedding_path, projection):
word2vec = df.EmbeddingFeaturizer(embedding_path)
if projection == 'mean_pool':
return word2vec.mean_args
elif projection == 'sum_pool':
return word2vec.additive_args
elif projection == 'max_pool':
return word2vec.max_args
elif projection == 'top':
return word2vec.top_args
else:
raise ValueError('projection must be one of {mean_pool, sum_pool, max_pool, top}. Got %s ' % projection)
def _get_zh_word2vec_ff(num_units, vec_type, projection, cdtb):
prefix = 'zh_gigaword3'
if cdtb:
file_name = '/data/word_embeddings/%s-%s%s-cdtb_vocab.txt' \
% (prefix, vec_type, num_units)
else:
file_name = '/data/word_embeddings/%s-%s%s.txt' \
% (prefix, vec_type, num_units)
word2vec = df.EmbeddingFeaturizer(file_name)
if projection == 'mean_pool':
return word2vec.mean_args
elif projection == 'sum_pool':
return word2vec.additive_args
elif projection == 'max_pool':
return word2vec.max_args
elif projection == 'top':
return word2vec.top_args
else:
raise ValueError('projection must be one of {mean_pool, sum_pool, max_pool, top}. Got %s ' % projection)
def _sparse_featurize_relation_list(relation_list, ff_list, alphabet=None):
if alphabet is None:
alphabet = {}
grow_alphabet = True
else:
grow_alphabet = False
feature_vectors = []
print 'Applying feature functions...'
for relation in relation_list:
feature_vector_indices = []
for ff in ff_list:
feature_vector = ff(relation)
for f in feature_vector:
if grow_alphabet and f not in alphabet:
alphabet[f] = len(alphabet)
if f in alphabet:
feature_vector_indices.append(alphabet[f])
feature_vectors.append(feature_vector_indices)
print 'Creating feature sparse matrix...'
feature_matrix = sp.sparse.lil_matrix((len(relation_list), len(alphabet)))
for i, fv in enumerate(feature_vectors):
feature_matrix[i, fv] = 1
return feature_matrix.tocsr(), alphabet
def sparse_featurize(relation_list_list, ff_list):
print 'Featurizing...'
data_list = []
alphabet = None
for relation_list in relation_list_list:
data, alphabet = _sparse_featurize_relation_list(relation_list, ff_list, alphabet)
data_list.append(data)
return (data_list, alphabet)
def convert_seconds_to_hours(num_seconds):
m, s = divmod(num_seconds, 60)
h, m = divmod(m, 60)
return (h, m, s)
def compute_mi(feature_matrix, label_vector):
"""Compute mutual information of each feature
"""
num_labels = np.max(label_vector) + 1
num_features = feature_matrix.shape[1]
num_rows = feature_matrix.shape[0]
total = num_rows + num_labels
c_y = np.zeros(num_labels)
for l in label_vector:
c_y[l] += 1.0
c_y += 1.0
c_x_y = np.zeros((num_features, num_labels))
c_x = np.zeros(num_features)
for i in range(num_rows):
c_x_y[:, label_vector[i]] += feature_matrix[i, :]
c_x += feature_matrix[i, :]
c_x_y += 1.0
c_x += 1.0
c_x_c_y = np.outer(c_x, c_y)
c_not_x_c_y = np.outer((total - c_x), c_y)
c_not_x_y = c_y - c_x_y
inner = c_x_y / total * np.log(c_x_y * total / c_x_c_y) + \
c_not_x_y / total * np.log(c_not_x_y * total / c_not_x_c_y)
mi_x = inner.sum(1)
return mi_x
def prune_feature_matrices(feature_matrices, mi, num_features):
sorted_indices = mi.argsort()[-num_features:]
return [x | [:, sorted_indices] for x in feature_matrices]
class BrownDictionary(object):
def __init__(self):
self.word_to_brown_mapping = {}
self.num_clusters = 0
brown_cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c3200-freq1.txt'
#brown_ | cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c320-freq1.txt'
#brown_cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c100-freq1.txt'
self._load_brown_clusters('resources/%s' % brown_cluster_file_name)
def _load_brown_clusters(self, path):
try:
lexicon_file = open(path)
except:
print 'fail to load brown cluster data'
cluster_set = set()
for line in lexicon_file:
cluster_assn, word, _ = line.split('\t')
if cluster_assn not in cluster_set:
cluster_set.add(cluster_assn)
self.word_to_brown_mapping[word] = len(cluster_set) - 1
self.num_clusters = len(cluster_set)
def _get_brown_cluster_bag(self, tokens):
bag = set()
for token in tokens:
if token in self.word_to_brown_mapping:
cluster_assn = self.word_to_brown_mapping[token]
if cluster_assn not in bag:
bag.add(cluster_assn)
return bag
def get_brown_sparse_matrices_relations(self, relations):
X1 = sp.sparse.csr_matrix((len(relations), self.num_clusters),dtype=float)
X2 = sp.sparse.csr_matrix((len(relations), self.num_clusters),dtype=float)
for i, relation in enumerate(relations):
bag1 = self._get_brown_cluster_bag(relation.arg_tokens(1))
for cluster in bag1:
X1[i, cluster] = 1.0
bag2 = self._get_brown_cluster_bag(relation.arg_tokens(2))
for cluster in bag2:
X2[i, cluster] = 1.0
return (X1, X2)
def get_brown_matrices_data(self, relation_list_list, use_sparse):
"""Extract sparse
For each directory, returns
(X1, X2, Y)
X1 and X2 are sparse matrices from arg1 and arg2 respectively.
Y is an integer vector of type int32
"""
data = []
alphabet = None
# load the data
for relation_list in relation_list_list:
# turn them into a data matrix
print 'Making matrices'
X1, X2 = self.get_brown_sparse_matrices_relations(relation_list)
if not use_sparse:
X1 = X1.toarray()
X2 = X2.toarray()
Y, alphabet = level2_labels(relation_list, alphabet)
data.append((X1, X2, Y))
return (data, alphabet)
def label_vectorize(relation_list_list, lf):
alphabet = {}
for i, valid_label in enumerate(lf.valid_labels()):
alphabet[valid_label] = i
label_vectors = []
for relation_list in relation_list_list:
label_vector = [alphabet[lf.label(x)] for x in relation_list]
label_vectors.append(np.array(label_vector, np.int64))
return label_vectors, alphabet
def compute_baseline_acc(label_vector):
label_counter = Counter()
for label in label_vector:
label_counter[label] += 1.0
_, freq = label_counter.most_common(1)[0]
return round(freq / len(label_vector), 4)
def convert_level2_labels(relations):
# TODO: this is not enough because we have to exclude some tinay classes
new_relation_list = []
for relation in relations:
split_sense = relation.senses[0].split('.')
if len(split_sense) >= 2:
relation.relation_dict['Sense']= ['.'.join(split_sense[0:2])]
new_relation_list.append(relation)
return new_relation_list
def level2_labels(relations, alphabet=None):
if alphabet is None:
alphabet = {}
label_set = set()
for relation in relations:
label_set.add(relation.senses[0])
print label_set
sorted_label = sorted(list(label_set))
for i, label in enumerate(sorted_label):
alphabet[label] = i
label_vector = []
for relation in relations:
if relation.senses[0] not in alphabet:
alphabet[relation.senses[0]] = len(alphabet)
label_vector.append(alphabet[relation.senses[0]])
return np.array(labe |
geggo/pyface | pyface/multi_toolbar_window.py | Python | bsd-3-clause | 5,751 | 0.000522 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" A top-level application window that supports multiple toolbars. """
from __future__ import absolute_import
# Major package imports.
import wx
# Enthought library imports
from pyface.action.api import ToolBarManager
from traits.api import Trait, TraitDict, TraitEnum, TraitList
# Local imports
from .application_window import ApplicationWindow
class MultiToolbarWindow(ApplicationWindow):
""" A top-level application window that supports multiple toolbars.
The multi-toolbar window has support for a menu bar, status bar, and
multiple toolbars (all of which are optional).
"""
# The toolbars in the order they were added to the window.
_tool_bar_managers = Trait([], TraitList(Trait(ToolBarManager)))
# Map of toolbar to screen location.
_tool_bar_locations = Trait({},
TraitDict(Trait(ToolBarManager),
TraitEnum('top', 'bottom',
'left', 'right')))
###########################################################################
# Protected 'Window' interface.
###########################################################################
def _create_contents(self, parent):
panel = super(MultiToolbarWindow, self)._create_contents(parent)
self._create_trim_widgets(parent)
return panel
def _create_trim_widgets(self, parent):
# The frame's icon.
self._set_window_icon()
# Add the (optional) menu bar.
self._create_menu_bar(parent)
# Add the (optional) status bar.
self._create_status_bar(parent)
# Add the (optional) tool bars.
self.sizer = self._create_tool_bars(parent)
return
def _create_tool_bars(self, parent):
""" Create t | he tool bars for this window. """
if len(self._tool_bar_managers) > 0:
# Create a top level sizer to handle to main layout and attach
# it to the parent frame.
self.main_sizer = sizer = wx.BoxSizer(wx.VERTICAL)
parent.SetSizer(sizer)
parent.SetAutoLayout(True)
for tool_bar_manager in self | ._tool_bar_managers:
location = self._tool_bar_locations[tool_bar_manager]
sizer = self._create_tool_bar(parent, sizer, tool_bar_manager,
location)
return sizer
return None
def _create_tool_bar(self, parent, sizer, tool_bar_manager, location):
""" Create and add the toolbar to the parent window at the specified
location.
Returns the sizer where the remaining content should be added. For
'top' and 'left' toolbars, we can return the same sizer that contains
the toolbar, because subsequent additions will be added below or to
the right of those toolbars. For 'right' and 'bottom' toolbars, we
create a spacer toolbar to hold the content.
"""
tool_bar = tool_bar_manager.create_tool_bar(parent)
if location == 'top':
child_sizer = wx.BoxSizer(wx.VERTICAL)
child_sizer.Add(tool_bar, 0, wx.ALL | wx.ALIGN_LEFT | wx.EXPAND)
sizer.Add(child_sizer, 1, wx.ALL | wx.EXPAND)
if location == 'bottom':
toolbar_sizer = wx.BoxSizer(wx.VERTICAL)
# Add the placeholder for the content before adding the toolbar.
child_sizer = self._create_content_spacer(toolbar_sizer)
# Add the tool bar.
toolbar_sizer.Add(tool_bar, 0, wx.ALL | wx.ALIGN_TOP | wx.EXPAND)
sizer.Add(toolbar_sizer, 1, wx.ALL | wx.EXPAND)
if location == 'left':
child_sizer = wx.BoxSizer(wx.HORIZONTAL)
child_sizer.Add(tool_bar, 0, wx.ALL | wx.ALIGN_TOP | wx.EXPAND)
sizer.Add(child_sizer, 1, wx.ALL | wx.EXPAND)
if location == 'right':
toolbar_sizer = wx.BoxSizer(wx.HORIZONTAL)
# Add the placeholder for the content before adding the toolbar.
child_sizer = self._create_content_spacer(toolbar_sizer)
# Add the tool bar.
toolbar_sizer.Add(tool_bar, 0, wx.ALL | wx.ALIGN_TOP | wx.EXPAND)
sizer.Add(toolbar_sizer, 1, wx.ALL | wx.EXPAND)
return child_sizer
def _create_content_spacer(self, sizer):
spacer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(spacer, 1, wx.ALL | wx.EXPAND)
return spacer
###########################################################################
# Public MultiToolbarWindow interface
###########################################################################
def add_tool_bar(self, tool_bar_manager, location='top'):
""" Add a toolbar in the specified location.
Valid locations are 'top', 'bottom', 'left', and 'right'
"""
self._tool_bar_managers.append(tool_bar_manager)
self._tool_bar_locations[tool_bar_manager] = location
#### EOF ######################################################################
|
zerovip/link-link | game11_11/game11_11/urls.py | Python | apache-2.0 | 824 | 0.004854 | """game11_11 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$' | , views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
# url( | r'^admin/', admin.site.urls),
url(r'^link_link/', include('link_link.urls')),
]
|
andrewyoung1991/supriya | supriya/tools/ugentools/LPZ2.py | Python | mit | 2,895 | 0.002763 | # -*- encoding: utf-8 -*-
from supriya.tools.ugentools.Filter import Filter
class LPZ2(Filter):
r'''A two zero fixed lowpass filter.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2
LPZ2.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Filter UGens'
__slots__ = ()
_ordered_input_names = (
'source',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
source=None,
):
Filter.__init__(
self,
calculation_rate=calculation_rate,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
):
r'''Constructs an audio-rate LPZ2.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2
LPZ2.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
source=source,
)
return ugen
# def coeffs(): ...
@classmethod
def kr(
cls,
source=None,
):
r'''Constructs a control-rate LPZ2.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.kr(
... source=source,
... )
>>> lpz_2
LPZ2.kr()
Returns ugen graph.
'''
from supriya.tools import | synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
source=source,
)
return ugen
# def magResponse(): ...
# def magResponse2(): ...
# def magResponse5(): ...
# def magResponseN(): ...
# def scopeResponse(): ...
### PUBLIC PROPERTIES ###
@property
def source(self):
r'''Gets `source` input of LPZ2.
| ::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index] |
cloudify-cosmo/version-tool | version_control/tests/resources/cloudify-test2/input/cloudify-module/setup.py | Python | apache-2.0 | 1,244 | 0 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You m | ay ob | tain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
setup(
name='cloudify-agent-installer-plugin',
version='3.1a4',
author='elip',
author_email='elip@gigaspaces.com',
packages=['worker_installer'],
license='LICENSE',
description='Plugin for installing a Cloudify agent on a machine',
install_requires=[
'cloudify-plugins-common==3.1b4',
'cloudify-script-plugin==1.2a4',
'cloudify-openstack-provider==3.1b4',
'cloudify-dsl-parser>=3.1b4',
'fabric==1.8.3',
'jinja2==2.7.2'
],
tests_require=[
"nose",
"python-vagrant"
]
)
|
WalissonRodrigo/SIMPLEMOOC | simplemooc/accounts/urls.py | Python | gpl-3.0 | 686 | 0.010204 | from django.conf.urls import include, url
from simplemooc.accounts. | views import *
from django.contrib.auth.views import logout, login
urlpatterns = [
url(r'^$', dashboard, name='dashboard'),
url(r'^entrar/$', login, {'template_name': 'accounts/login.html'}, name='login'),
url(r'^sair/$', logout, {'next_page': 'core:home'}, name='logout'),
url(r'^nova-senha/$', password_reset, name='password_reset'),
url(r'^confirmar-nova-senha/(?P<key>\w+)/$', password_reset_confirm, name='password_reset_confirm'),
url(r'^cadastre-se/$', register, na | me='register'),
url(r'^editar/$', edit, name='edit'),
url(r'^editar-senha/$', edit_password, name='edit_password'),
] |
lsjostro/pulp_win | test/upload_msi.py | Python | gpl-2.0 | 6,243 | 0.006728 | #!/usr/bin/python
import os
import requests
from requests.exceptions import HTTPError
import hashlib
import json
from optparse import OptionParser
from glob import glob
from msilib import *
CHUNK_SIZE = 1048576 # 1 Mb chunk size
class MsiUploader():
def __init__(self, url, user, password):
self.base_url = url
self.basic_auth = (user, password)
def get_repo(self, repo_id):
repo_path = "/pulp/api/v2/repositories/%s/" % repo_id
r = requests.get(self.base_url + repo_path, auth=self.basic_auth, verify=False)
if r.status_code == 404:
return False
return True
def create_repo(self, repo_id):
repo_base = "/pulp/api/v2/repositories/"
importer_path = repo_base + "%s/importers/" % repo_id
distributor_path = repo_base + "%s/distributors/" % repo_id
repo_metadata = {
"display_name": "MSI repo: %s" % repo_id,
"id": repo_id,
"notes": { "_repo-type" : "win-repo" }
}
r = requests.post(self.base_url + repo_base, auth=self.basic_auth, verify=False, data=json.dumps(repo_metadata))
r.raise_for_status()
importer_data = {
"importer_type_id": "win_importer",
"importer_config": {}
}
distributor_data = {
"distributor_type_id": "win_distributor",
"distributor_id": "win_distributor",
"distributor_config": { "http" : True, "https" : False, "relative_url" : repo_id },
"auto_publish": True
}
r = requests.post(self.base_url + importer_path, auth=self.basic_auth, verify=False, data=json.dumps(importer_data))
r.raise_for_status()
r = requests.post(self.base_url + distributor_path, auth=self.basic_auth, verify=False, data=json.dumps(distributor_data))
r.raise_for_status()
def upload_file(self, filename, repo_id):
repo_path = "/pulp/api/v2/repositories/%s/" % repo_id
upload_req_path = "/pulp/api/v2/content/uploads/"
import_path = repo_path + "actions/import_upload/"
name = self._get_msi_property(filename, "ProductName")
version = self._get_msi_property(filename, "ProductVersion")
r = requests.post(self.base_url + upload_req_path, auth=self.basic_auth, verify=False)
r.raise_for_status()
upload_id = r.json()['upload_id']
try:
file_size = os.path.getsize(filename)
offset = 0
m = hashlib.new('md5')
f = open(filename, 'rb')
while True:
# Load the chunk to upload
f.seek(offset)
data = f.read(CHUNK_SIZE)
if not data:
break
# calc md5
m.update(data)
# Server request
upload_path = "/pulp/api/v2/content/uploads/%s/%s/" % (upload_id, offset)
r = requests.put(self.base_url + upload_path, auth=self.basic_auth, verify=False, data=data)
r.raise_for_status()
offset = min(offset + CHUNK_SIZE, file_size)
f.close()
md5sum = m.hexdigest()
unit_metadata = {
"upload_id": upload_id,
"unit_type_id": "msi",
"unit_key": { "name": name, "checksum": md5sum, "version": version, "checksumtype": "md5" },
"unit_metadata": { "filename": os.path.basename(filename) }
}
# Post metadata for unit
r = requests.post(self.base_url + import_path, auth=self.basic_auth, verify=False, data=json.dumps(unit_metadata))
r.raise_for_status()
print "%s (%s): OK!" % (filename, md5sum)
except (HTTPError, IOError), e:
raise
finally:
delete_path = "/pulp/api/v2/content/uploads/%s/" % upload_id
r = requests.delete(self.base_url + delete_path, auth=self.basic_auth, verify=False)
| r.raise_for_status()
return True
def publish_repo(self, repo_id):
repo_path = "/pulp/api/v2/repositories/%s/" | % repo_id
publish_path = repo_path + "actions/publish/"
distributor_data = {
"id": "win_distributor",
"override_config": {}
}
r = requests.post(self.base_url + publish_path, auth=self.basic_auth, verify=False, data=json.dumps(distributor_data))
r.raise_for_status()
return True
def _get_msi_property(self, path, prop):
try:
db = OpenDatabase(path, MSIDBOPEN_READONLY)
except:
raise
view = db.OpenView ("SELECT Value FROM Property WHERE Property='%s'" % prop)
view.Execute(None)
result = view.Fetch()
return result.GetString(1)
def parse_options():
parser = OptionParser()
parser.add_option('-f', '--files', type="string", dest="files",
help="MSI file[s] to upload (wild cards are supported, e.g *.msi)")
parser.add_option('-u', '--username', type="string", dest="username",
help="Username")
parser.add_option('-p', '--password', type="string", dest="password",
help="Password")
parser.add_option('-b', '--base-url', type="string", dest="base_url",
help="Base URL to Pulp server")
parser.add_option('-r', '--repo-id', type="string", dest="repo_id",
help="Repo ID")
options, args = parser.parse_args()
if (not options.files or not options.repo_id or not options.base_url
or not options.username or not options.password):
parser.error("use --help for help ")
return options
def main():
options = parse_options()
files = glob(options.files)
if not files:
raise OSError("File not found")
m = MsiUploader(options.base_url, options.username, options.password)
## Check if repo exists else create it.
if not m.get_repo(options.repo_id):
m.create_repo(options.repo_id)
## Upload files
for f in files:
m.upload_file(f, options.repo_id)
## Publish unit
m.publish_repo(options.repo_id)
if __name__ == '__main__':
main()
|
CINPLA/expipe-dev | exana/exana/_version.py | Python | gpl-3.0 | 18,442 | 0 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "exana/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
| return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout | was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty |
ahtn/keyplus | host-software/keyplus/keycodes/lang_map/English0.py | Python | mit | 2,332 | 0.000433 | # Copyright 2018 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from hid_keycodes import *
lang = 'English'
country = 'Canada'
scancode_map = {
KC_0: ('0', ')', ']', '', '', ''),
KC_1: ('1', '!', '', '', '', ''),
KC_2: ('2', '@', '', '', '', ''),
KC_3: ('3', '#', '', '', '', ''),
KC_4: ('4', '$', '', '', '', ''),
| KC_5: ('5', '%', '', '', '', ''),
KC_6: ('6', '?', '', '', '', ''),
KC_7: ('7', '&', '{', '', '', ''),
KC_8: ('8', '*', '}', '', '', ''),
KC_9: ('9', '(', '[', '', '', ''),
KC_A: ('a', 'A', '', '', '', ''),
KC_B: ('b', 'B', '', '', '', ''),
KC_C: ('c', 'C', '', '', '', ''),
KC_D: ('d', 'D', '', '', '', ''),
KC_E: ('e', 'E', '', '', '', ''),
KC_F: ('f', 'F', '', '', '', ''),
KC_G: ('g', 'G', '', '', '' | , ''),
KC_H: ('h', 'H', '', '', '', ''),
KC_I: ('i', 'I', '', '', '', ''),
KC_J: ('j', 'J', '', '', '', ''),
KC_K: ('k', 'K', '', '', '', ''),
KC_L: ('l', 'L', '', '', '', ''),
KC_M: ('m', 'M', '', '', '', ''),
KC_N: ('n', 'N', '', '', '', ''),
KC_O: ('o', 'O', '', '', '', ''),
KC_P: ('p', 'P', '', '', '', ''),
KC_Q: ('q', 'Q', '', '', '', ''),
KC_R: ('r', 'R', '', '', '', ''),
KC_S: ('s', 'S', '', '', '', ''),
KC_T: ('t', 'T', '', '', '', ''),
KC_U: ('u', 'U', '', '', '', ''),
KC_V: ('v', 'V', '', '', '', ''),
KC_W: ('w', 'W', '', '', '', ''),
KC_X: ('x', 'X', '»', '', '', ''),
KC_Y: ('y', 'Y', '', '', '', ''),
KC_Z: ('z', 'Z', '«', '', '', ''),
KC_APOSTROPHE: ('è', 'È', '', '', '', ''),
KC_COMMA: (',', "'", '<', '', '', ''),
KC_EQUAL: ('=', '+', '¬', '', '', ''),
KC_FORWARD_SLASH: ('-', '_', '-', '-', '_', ''),
KC_FORWARD_SLASH: ('/', '?', '', 'ё', 'Ё', ''),
KC_FORWARD_SLASH: ('é', 'É', '', '', '', ''),
KC_GRAVE: ('/', '\\', '|', '', '', ''),
KC_ISO_BACK_SLASH: ('ù', 'Ù', '', '', '', ''),
KC_ISO_HASH: ('à', 'À', '', '', '', ''),
KC_LEFT_BRACKET: ('̂', '̈', '̀', '', '', ''),
KC_MINUS: ('-', '_', '', '', '', ''),
KC_PERIOD: ('.', '"', '>', '', '', ''),
KC_PERIOD: ('.', '>', '', 'ю', 'Ю', ''),
KC_RIGHT_BRACKET: ('ç', 'Ç', '~', '', '', ''),
KC_SEMICOLON: (';', ':', '°', '', '', ''),
KC_SPACEBAR: (' ', ' ', '', '', '', ''),
} |
10se1ucgo/cassiopeia | cassiopeia/datastores/merakianalyticscdn.py | Python | mit | 1,352 | 0.002219 | from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, PipelineContext, NotFoundError
from ..dto.patch import PatchListDto
from .common import HTTPClient, HTTPError
try:
import ujson as json
except ImportError:
import json
json.decode = json.loads
T = TypeVar("T")
class MerakiAnalyticsCDN(DataSource):
def __init__(self, http_client: HTTPClient = None) -> None:
if http_client is None:
| self._client = HTTPClient()
else:
self._client = http_client
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
##############
# Patch List #
##############
| @get.register(PatchListDto)
def get_patch_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> PatchListDto:
url = "https://cdn.merakianalytics.com/riot/lol/resources/patches.json"
try:
body = self._client.get(url)[0]
body = json.decode(body)
except HTTPError as e:
raise NotFoundError(str(e)) from e
return PatchListDto(**body)
|
SJIT-Hackerspace/SJIT-CodingPortal | hackerspace/migrations/0010_auto_20160906_1442.py | Python | apache-2.0 | 2,386 | 0.001676 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-06 09:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackerspace', '0009_verbal_subcategory'),
]
operations = [
migrations.RemoveField(
model_name='programmingquestion',
name='op1',
),
migrations.RemoveField(
model_name='programmingquestion',
name='op2',
),
migrations.RemoveField(
model_name='programmingquestion',
name='op3',
),
migrations.RemoveField(
model_name='programmingquestion',
name='op4',
),
migrations.AddField(
model_name='programmingquestion',
name='Output',
field=models.CharField(default='1', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='programm | ingquestion',
name='TestCases',
field=models.CharField(default='2', max_leng | th=200, verbose_name='Test Cases'),
preserve_default=False,
),
migrations.AddField(
model_name='quiz',
name='Answer',
field=models.CharField(default='3', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='verbal',
name='Answer',
field=models.CharField(default='3', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='programmingquestion',
name='subCategory',
field=models.CharField(choices=[(1, 'Strings'), (2, 'Dynamic Programming'), (3, 'Arrays'), (4, 'Data Structures')], max_length=200),
),
migrations.AlterField(
model_name='test',
name='ProgrammingTagName',
field=models.CharField(choices=[(1, 'Strings'), (2, 'Dynamic Programming'), (3, 'Arrays'), (4, 'Data Structures')], max_length=200, verbose_name='Programming Tags'),
),
migrations.AlterField(
model_name='verbal',
name='subCategory',
field=models.CharField(choices=[(1, 'Comprehension'), (2, 'Error Identification')], max_length=200),
),
]
|
Programmica/python-gtk3-tutorial | _examples/overlay.py | Python | cc0-1.0 | 798 | 0.002506 | #!/usr/bin/env python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
window = Gtk.Window()
window.set_default_size(200, 200)
window.connect("destroy", Gtk.main_quit)
overlay = Gtk.Overlay()
window.add(overlay)
textview = Gtk.TextView()
textview.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
textbuffer = textview.get_buffer() |
textbuffer.set_text("Welcome to the PyGObject Tutorial\n\nThis guide aims to provid | e an introduction to using Python and GTK+.\n\nIt includes many sample code files and exercises for building your knowledge of the language.", -1)
overlay.add(textview)
button = Gtk.Button(label="Overlayed Button")
button.set_valign(Gtk.Align.CENTER)
button.set_halign(Gtk.Align.CENTER)
overlay.add_overlay(button)
overlay.show_all()
window.show_all()
Gtk.main()
|
andela/troupon | troupon/cart/views.py | Python | mit | 6,304 | 0.000159 | import os
from carton.cart import Cart
from django.contrib import messages
from django.shortcuts import render, redirect
from django.template.response import TemplateResponse
from django.views.generic import View
from authentication.views import LoginRequiredMixin
from deals.models import Deal
from .models import UserShippingDetails
class CheckoutView(LoginRequiredMixin, View):
"""
Creates a checkout page.
Attributes:
template_name: name of the template that renders the view
stripe_secret_api_key: the secret API key for our stripe account
stripe_publishable_api_key: the publishable API key
"""
template_name = 'cart/checkout.html'
stripe_secret_api_key = os.getenv('STRIPE_SECRET_API_KEY')
stripe_publishable_api_key = os.getenv('STRIPE_PUBLISHABLE_API_KEY')
def get(self, request, **kwargs):
"""
Create checkout page.
Gets shopping information from cart and sends it to the payment app
in form of a dict. It then renders the checkout template which can then
be used to pay.
Args:
request: The incoming get request object
**kwargs: Any keyword arguments passed to the function
Returns:
A template rendered with the payment details context
"""
cart = Cart(request.session)
amount = cart.total
amount_in_cents = int(amount) * 100
title = "Total payment expected"
description = "Troupon shopping"
payment_details = {
"title": title,
"key": self.stripe_publishable_api_key,
"amount": amount_in_cents,
"description": description,
"currency": "usd",
}
request.session['payment_details'] = payment_details
context = {
"amount": amount,
"title": title,
"description": description,
"payment_details": payment_details,
}
return render(request, self.template_name, context)
class AddToCartView(LoginRequiredMixin, View):
"""
Add items to cart.
When a logged in person clicks on Add to cart on a deal, this view
adds the item to the cart.
Attributes:
LoginRequiredMixin: Ensures the user is logged in
View: Normal django view
"""
def post(self, request, **kwargs):
"""
Add item to cart.
Args:
request: The incoming post request object
**kwargs: Any keyword arguments passed to the function
Returns:
A redirect to the deals homepage
"""
dealid = request.POST.get('dealid')
deal = Deal.objects.get(id=dealid)
cart = Cart(request.session)
cart.add(deal, price=deal.price)
return redirect('/')
class AddShippingDetails(LoginRequiredMixin, View):
"""
Add shipping details of user.
When a logged in user clicks on proceed to checkout this view
gets the shipping details of the user
Attributes:
LoginRequiredMixin: Ensures the user is logged in
View: Normal django view
"""
def get(self, request):
cart = Cart(request.session)
context = {'cart': cart}
return TemplateResponse(request, 'cart/shipping.html', context)
def post(self, request, **kwargs):
"""
Add shipping details.
Args:
request: The incoming post request object
**kwargs: Any keyword arguments passed to the function
Returns:
A redirect to the checkout page
"""
user = request.user
street = request.POST.get('street')
state = request.POST.get('state')
postal = request.POST.get('postal')
telephone = request.POST.get('telephone')
shipping = UserShippingDetails(user=user, street=street, postal=postal, state=state, telephone=telephone)
shipping.save()
cart = Cart(request.session)
context = {'cart': cart}
return TemplateResponse(request, 'cart/checkout.html', context)
class ViewCartView(LoginRequiredMixin, View):
"""
Allow user to view all the items in the cart.
A logged in user with items in the cart can see a
summary of them and their prices.
Attributes:
LoginRequiredMixin: Ensures the user is logged in
View: Normal django view
"""
def get(self, request, **kwargs):
"""
Show cart items.
Args:
request: The incoming get request object
**kwargs: Any keyword arguments passed to the function
Returns:
A template rendered with all the cart items.
"""
cart = Cart(request.session)
context = {'cart': cart}
return TemplateResponse(request, 'cart/cart.html', context)
class ClearCartView(LoginRequiredMixin, View):
"""
Clear items in cart.
When triggered, removes every item in the cart session
and leaves it empty.
Attributes:
LoginRequiredMixin: Ensures the user is logged in
View: Normal django view
"""
def get(self, request, **kwargs):
"""
Get cart from session and remove everything from it.
Args:
request: T | he incoming get request object
**kwargs: Any keyword arguments passed to the function
Returns:
A redirect to the deals homepage
"""
cart = Cart(request.session)
cart.clear()
return redirect('/')
class RemoveItemView(LoginRe | quiredMixin, View):
"""
Remove item from cart.
When triggered, removes a particular item from the cart session
based on its id.
Attributes:
LoginRequiredMixin: Ensures the user is logged in
View: Normal django view
"""
def post(self, request, **kwargs):
"""
Remove item from cart.
Args:
request: The incoming get request object
**kwargs: Any keyword arguments passed to the function
Returns:
A redirect to the deals homepage
"""
dealid = request.POST.get('dealid')
deal = Deal.objects.get(id=dealid)
cart = Cart(request.session)
cart.remove(deal)
return redirect('/')
|
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerparsers/libs_telemac/parsers/parserFortran.py | Python | gpl-3.0 | 59,062 | 0.002438 | """@author Sebastien E. Bourban and Noemie Durand
"""
"""@note ... this work is based on a collaborative effort between
.________. ,--.
| | . ( (
|,-. / HR Wallingford EDF - LNHE / \_ \_/ .--.
/ \ / Howbery Park, 6, quai Watier \ ) /_ )
,. `' Wallingford, Oxfordshire 78401 Cedex `-'_ __ `--
/ \ / OX10 8BA, United Kingdom Chatou, France __/ \ \ `.
/ `-'| www.hrwallingford.com innovation.edf.com | ) ) )
!________! `--' `--
"""
"""@history 28/04/2011 -- Sebastien E. Bourban
Now supports SYSTELCFG as a directory (old Perl version, to which
systel.cfg is added) or as a file.
"""
"""@history 30/04/2011 -- Sebastien E. Bourban
Upgrade made to config parsing to include the option to reset the
version and the root from the command line option:
-v <version>, reset the version read in the config file with this
-r <root>, reset the root path read in the config file with this
"""
"""@history 04/12/2012 -- Juliette Parisi and Sebastien E. Bourban
Simplifying call to parseConfigFile, which now takes two arguments
options.configFile, and options.configName and return one or more
valid configurations in an array. Testing for validity is now done
within config.py
"""
"""@history 10/01/2013 -- Yoann Audouin
ScanSources goes through subdirectories as well now ignore
hidden directories
Adding scan of .F and .F90 files as well
"""
"""@history 06/02/2013 -- Sebastien E. Bourban
Adding the functionality of displaying changes (html/diff) made
to a PRINCI file by comparing individual subroutines to their
original version.
Further capability to compare changes made between 2 PRINCI files.
"""
"""@history 01/07/2013 -- Sebastien E. Bourban and Yoann Audoin
Upgrade to the new structure
"""
"""@history 13/07/2013 -- Sebastien E. Bourban
Now deals with DECLARATIONS first before identifying unkonwn externals
"""
"""@history 23/09/2014 -- Sebastien E. Bourban and Yoann Audoin
The content of the log files from GRETEL and PARTEL are now reported
in the error report.
"""
"""@brief
"""
# _____ ___________________________________________________
# | ____/ Imports /__________________________________________________/
#
# ~~> dependencies towards standard python
import re
import sys
from copy import deepcopy
from os import path, walk, remove, environ, sep
# ~~> dependencies towards the root of pytel
sys.path.append(path.join(path.dirname(sys.argv[0]), "..")) # clever you !
from config import OptionParser, parseConfigFile, parseConfig_CompileTELEMAC
# ~~> dependencies towards | other pytel/modules
from utilstelemac.files import (
getTheseFiles,
isNewer,
addToList,
addFileContent,
getFileContent,
putFileContent,
diffTextFiles,
)
from utilstelemac.progressbar import ProgressBar
debug = False
# _____ ______________________________________________
# ____/ Instructions /_____________________________________________/
#
listINSTRUCTION = [
"ALLOCATE",
"ASSIGN",
"BACKSPACE",
"BLOCK DATA",
"CALL",
"CASE",
"CLOSE",
"COMMON",
"CYCLE",
"CONTINUE",
"DATA",
"DEALLOCATE",
"DEFAULT",
"DO",
"ELSE",
"ELSEIF",
"ENDIF",
"ENDDO",
"END",
"ENDFILE",
"EQUIVALENCE",
"EXIT",
"FORMAT",
"GO",
"TO",
"GOTO",
"IF",
"IMPLICIT NONE",
"INCLUDE",
"INQUIRE",
"INTERFACE",
"MULTIPLE",
"NAMELIST",
"NULLIFY",
"OPEN",
"PRINT",
"READ",
"REWIND",
"RETURN",
"SELECT",
"STOP",
"SAVE",
"THEN",
"USE",
"WHILE",
"WHERE",
"WRITE",
]
listINTRINSIC = [
"ABS",
"ACCESS",
"ACHAR",
"ACOS",
"ACOSH",
"ADJUSTL",
"ADJUSTR",
"AIMAG",
"AINT",
"ALARM",
"ALL",
"ALLOCATED",
"AND",
"ANINT",
"ANY",
"ASIN",
"ASINH",
"ASSOCIATED",
"ATAN",
"ATAN2",
"ATANH",
"BESJ0",
"BESJ1",
"BESJN",
"BESY0",
"BESY1",
"BESYN",
"BIT_SIZE",
"BTEST",
"CEILING",
"CHAR",
"CHDIR",
"CHMOD",
"CMPLX",
"COMMAND_ARGUMENT_COUNT",
"CONJG",
"COS",
"COSH",
"COUNT",
"CPU_TIME",
"CSHIFT",
"CTIME",
"DATE_AND_TIME",
"DBLE",
"DCMPLX",
"DFLOAT",
"DIGITS",
"DIM",
"DOT_PRODUCT",
"DPROD",
"DREAL",
"DTIME",
"DMAX1",
"DMIN1",
"DMOD",
"DSQRT",
"DSIN",
"DCOS",
"DTAN",
"DABS",
"DATAN",
"DATAN2",
"DEXP",
"DLOG",
"DSINH",
"DCOSH",
"DTANH",
"EOSHIFT",
"EPSILON",
"ERF",
"ERFC",
"ETIME",
"EXIT",
"EXP",
"EXPONENT",
"FDATE",
"FGET",
"FGETC",
"FLOAT",
"FLOOR",
"FLUSH",
"FNUM",
"FPUT",
"FPUTC",
"FRACTION",
"FREE",
"FSEEK",
"FSTAT",
"FTELL",
"GERROR",
"GETARG",
"GET_COMMAND",
"GET_COMMAND_ARGUMENT",
"GETCWD",
"GETENV",
"GET_ENVIRONMENT_VARIABLE",
"GETGID",
"GETLOG",
"GETPID",
"GETUID",
"GMTIME",
"HOSTNM",
"HUGE",
"IACHAR",
"IAND",
"IARGC",
"IBCLR",
"IBITS",
"IBSET",
"ICHAR",
"IDATE",
"IEOR",
"IERRNO",
"INDEX",
"IDINT",
"INT",
"INT2",
"INT8",
"IOR",
"IRAND",
"ISATTY",
"ISHFT",
"ISHFTC",
"ITIME",
"KILL",
"KIND",
"LBOUND",
"LEN",
"LEN_TRIM",
"LGE",
"LGT",
"LINK",
"LLE",
"LLT",
"LNBLNK",
"LOC",
"LOG",
"LOG10",
"LOGICAL",
"LONG",
"LSHIFT",
"LSTAT",
"LTIME",
"MALLOC",
"MATMUL",
"MAX",
"MAX0",
"MAXEXPONENT",
"MAXLOC",
"MAXVAL",
"MCLOCK",
"MCLOCK8",
"MERGE",
"MIN",
"MIN0",
"MINEXPONENT",
"MINLOC",
"MINVAL",
"MOD",
"MODULO",
"MOVE_ALLOC",
"MVBITS",
"NEAREST",
"NEW_LINE",
"NINT",
"NOT",
"NULL",
"OR",
"PACK",
"PERROR",
"PRECISION",
"PRESENT",
"PRODUCT",
"RADIX",
"RANDOM_NUMBER",
"RANDOM_SEED",
"RAND",
"RANGE",
"RAN",
"REAL",
"RENAME",
"REPEAT",
"RESHAPE",
"RRSPACING",
"RSHIFT",
"SCALE",
"SCAN",
"SECNDS",
"SECOND",
"SELECTED_INT_KIND",
"SELECTED_REAL_KIND",
"SET_EXPONENT",
"SHAPE",
"SIGN",
"SIGNAL",
"SIN",
"SINH",
"SIZE",
"SLEEP",
"SNGL",
"SPACING",
"SPREAD",
"SQRT",
"SRAND",
"STAT",
"SUM",
"SYMLNK",
"SYSTEM",
"SYSTEM_CLOCK",
"TAN",
"TANH",
"TIME",
"TIME8",
"TINY",
"TRANSFER",
"TRANSPOSE",
"TRIM",
"TTYNAM",
"UBOUND",
"UMASK",
"UNLINK",
"UNPACK",
"VERIFY",
"XOR",
]
# _____ ________________________________
# ____/ Global Regular Expressions /_______________________________/
#
"""
"""
# ?beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
# ? r'\s*(?P<this>(\b(%s)\b))'+ \
# ? r'\s*(?P<after>%s)\s*\Z'
beforethisafter = r"(?P<before>%s(?=\s?(\b(%s)\b)))" + r"\s?(?P<this>(\b(%s)\b))" + r"\s?(?P<after>%s)\Z"
# ?emptyline = re.compile(r'\s*\Z') #,re.I)
emptyline = re.compile(r"\Z") # ,re.I)
f77comment = re.compile(r"[C!#*]") # ,re.I)
f77continu2 = re.compile(r"(\s{5}\S\s*)(?P<line>.*)") # ,re.I)
# ?f90comment = re.compile(r'(?P<line>([^"]*"[^"]*"[^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!]*))!{1}(?P<rest>.*)') #,re.I)
f90comment = re.compile(r'(?P<line>([^"]*"[^"]*"[^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!]*))!{1}(?P<rest>[^\Z]*)') # ,re.I)
# ?f90continu1 = re.compile(r'(?P<line>.*)&\s*\Z') #,re.I)
f90continu1 = re.compile(r"(?P<line>.*)&\Z") # ,re.I)
# ?f90continu2 = re.compile(r'(\s*&\s*)(?P<line>.*)&\s*\Z') #,re.I)
f90continu2 = re.compile(r"(&\s?)(?P<line>.*)&\Z") # ,re.I)
# ?f90continu3 = re.compile(r'(\s*& |
colloquium/spacewalk | spacewalk/certs-tools/client_config_update.py | Python | gpl-2.0 | 6,500 | 0.003385 | #!/usr/bin/python -u
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# key=value formatted "config file" mapping script
#
# NOT TO BE USED DIRECTLY
# This is called by a script generated by the rhn-bootstrap utility.
#
# Specifically engineered with the RHN Update Agent configuration files
# in mind though it is relatively generic in nature.
#
# Author: Todd Warner <taw@redhat.com>
#
# $Id$
"""
Client configuration mapping script that writes to an RHN Update Agent-type
config file(s)
I.e., maps a file with RHN Update Agent-like key=value pairs e.g.,
serverURL=https://test-satellite.example.redhat.com/XMLRPC
noSSLServerURL=http://test-satellite.example.redhat.com/XMLRPC
enableProxy=0
sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT
(NOTE - older RHN Satellite's and Proxy's used:
sslCACert=/usr/share/rhn/RHNS-CORP-CA-CERT)
And maps that to the client's configuration files.
-------------
To map new settings to a file that uses the format key=value, where
key[comment]=value is a comment line you do this (e.g., mapping
key=value pairs to /etc/sysconfig/rhn/up2date):
1. edit a file (e.g., 'client-config-overrides.txt'), inputing new key=value pairs
to replace in config file (e.g., /etc/sysconfig/rhn/up2date).
Specifically:
serverURL=https://test-satellite.example.redhat.com/XMLRPC
noSSLServerURL=http://test-satellite.example.redhat.com/XMLRPC
2. ./client_config_update.py /etc/sysconfig/rhn/up2date client-config-overrides.txt
That's all there is to it.
If you are running an older RHN Update Agent, the rhn_register file can be
mapped as well:
./client_config_update.py /etc/sysconfig/rhn/rhn_register client-config-overrides.txt
"""
import os
import sys
import string
import tempfile
DEFAULT_CLIENT_CONFIG_OVERRIDES = 'client-config-overrides.txt'
RHN_REGISTER = "/etc/sysconfig/rhn/rhn_register"
UP2DATE = "/etc/sysconfig/rhn/up2date"
def _parseConfigLine(line):
"""parse a line from a config file. Format can be either "key=value\n"
or "whatever text\n"
return either:
(key, value)
or
None
The '\n' is always stripped from the value.
"""
kv = string.split(line, '=')
if len(kv) < 2:
# not a setting
return None
if len(kv) > 2:
# '=' is part of the value, need to rejoin it.
kv = kv[0], string.join(kv[1:], '=')
if string.find(kv[0], '[comment]') > 0:
# comment; not a setting
return None
# it's a setting, trim the '\n' and return the (key, value) pair.
kv[0] = string.strip(kv[0])
if kv[1][-1] == '\n':
kv[1] = kv[1][:-1]
return tuple(kv)
def readConfigFile(configFile):
"read in config file, return dictionary of key/value pairs"
fin = open(configFile, 'rb')
d = {}
for line in fin.readlines():
kv = _parseConfigLine(line)
if kv:
d[kv[0]] = kv[1]
return d
def dumpConfigFile(configFile):
"print out dictionary of key/value pairs from configFile"
import pprint
pprint.pprint(readConfigFile(configFile))
def mapNewSettings(configFile, dnew):
fo = tempfile.TemporaryFile(prefix = '/tmp/client-config-overrides-', mode = 'r+b')
fin = open(configFile, 'rb')
changedYN = 0
# write to temp file
for line in fin.readlines():
kv = _parseConfigLine(line)
if not kv:
# not a setting, write the unaltered line
fo.write(line)
else:
# it's a setting, populate from the dictionary
if dnew.has_key(kv[0]):
if dnew[kv[0]] != kv[1]:
fo.write('%s=%s\n' % (kv[0], dnew[kv[0]]))
changedYN = 1
else:
fo.write(line)
# it's a setting but not being mapped
else:
fo.write(line)
fin.close()
if changedYN:
# write from temp file to configFile
fout = open(configFile, 'wb')
fo.seek(0)
fout.write(fo.read())
print '*', configFile, 'written'
def parseCommandline():
"""parse/process the commandline
Commandline is dead simple for easiest portability.
"""
# USAGE & HELP!
if '--usage' in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
print """\
usage: pytho | n %s CONFIG_FILENAME NEW_MAPPINGS [options]
arguments:
CONFIG_FILENAME config file to alter
NEW_MAPPINGS file containing new settings that map onto the
config file
options:
-h, --help show this help message and exit
- | -usage show brief usage summary
examples:
python %s %s %s
python %s %s %s
""" % (sys.argv[0],
sys.argv[0], RHN_REGISTER, DEFAULT_CLIENT_CONFIG_OVERRIDES,
sys.argv[0], UP2DATE, DEFAULT_CLIENT_CONFIG_OVERRIDES)
sys.exit(0)
if len(sys.argv) != 3:
msg = "ERROR: exactly two arguments are required, see --help"
raise TypeError(msg)
configFilename = os.path.abspath(sys.argv[1])
newMappings = os.path.abspath(sys.argv[2])
if not os.path.exists(configFilename):
msg = ("ERROR: filename to alter (1st argument), does not exist:\n"
" %s"
% configFilename)
raise IOError(msg)
if not os.path.exists(newMappings):
msg = ("ERROR: filename that contains the mappings (2nd argument), "
"does not exist:\n"
" %s" % newMappings)
raise IOError(msg)
return configFilename, newMappings
def main():
"parse commandline, process config file key=value mappings"
configFilename, newMappings = parseCommandline()
#dumpConfigFile(configFilename)
#mapNewSettings('test-up2date', readConfigFile(DEFAULT_CLIENT_CONFIG_OVERRIDES))
mapNewSettings(configFilename, readConfigFile(newMappings))
if __name__ == '__main__':
sys.exit(main() or 0)
|
ccxt/ccxt | examples/py/ftx-close-position-reduceOnly.py | Python | mit | 1,037 | 0.000964 | # -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
print('CCXT Version:', ccxt.__version__)
exchange = ccxt. | ftx({
'enableRateLimit': True,
'apiKey': 'YOUR_API_KEY',
| 'secret': 'YOUR_SECRET',
})
exchange.load_markets()
# exchange.verbose = True # uncomment for debugging purposes if necessary
symbol = 'BTC-PERP' # change for your symbol
positions = exchange.fetch_positions()
positions_by_symbol = exchange.index_by(positions, 'symbol')
if symbol in positions_by_symbol:
position = positions_by_symbol[symbol]
type = 'market'
side = 'sell' if position['side'] == 'long' else 'buy'
amount = position['contracts']
price = None
params = {
'reduceOnly': True
}
order = exchange.create_order(symbol, type, side, amount, price, params)
pprint(order)
else:
print('You do not have an open', symbol, 'position')
|
KoreaCloudObjectStorage/swift3 | swift3/test/functional/test_object.py | Python | apache-2.0 | 27,679 | 0.000036 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from email.utils import formatdate, parsedate
from time import mktime
from multifile import MultiFile
from cStringIO import StringIO
from hashlib import md5
from swift3.test.functional.s3_test_client import Connection
from swift3.test.functional.utils import get_error_code,\
calculate_md5
from swift3.test.functional import Swift3FunctionalTestCase
from swift3.etree import fromstring
DAY = 86400.0 # 60 * 60 * 24 (sec)
class TestSwift3Object(Swift3FunctionalTestCase):
def setUp(self):
super(TestSwift3Object, self).setUp()
self | .bucket = 'bucket'
self.conn.make_request('PUT', self.bucket)
def _assertObjectEtag(self, bucket, obj, etag):
status, headers, _ = | self.conn.make_request('HEAD', bucket, obj)
self.assertEquals(status, 200) # sanity
self.assertCommonResponseHeaders(headers, etag)
def test_object(self):
obj = 'object'
content = 'abc123'
etag = md5(content).hexdigest()
# PUT Object
status, headers, body = \
self.conn.make_request('PUT', self.bucket, obj, body=content)
self.assertEquals(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-length' in headers) # sanity
self.assertEquals(headers['content-length'], '0')
self._assertObjectEtag(self.bucket, obj, etag)
# PUT Object Copy
dst_bucket = 'dst_bucket'
dst_obj = 'dst_obj'
self.conn.make_request('PUT', dst_bucket)
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_obj,
headers=headers)
self.assertEquals(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertEquals(headers['content-length'], str(len(body)))
elem = fromstring(body, 'CopyObjectResult')
self.assertTrue(elem.find('LastModified').text is not None)
# TODO: assert LastModified value
self.assertTrue(elem.find('ETag').text is not None)
self.assertEquals(etag, elem.find('ETag').text.strip('"'))
self._assertObjectEtag(dst_bucket, dst_obj, etag)
# GET Object
status, headers, body = \
self.conn.make_request('GET', self.bucket, obj)
self.assertEquals(status, 200)
self.assertCommonResponseHeaders(headers, etag)
self.assertTrue(headers['last-modified'] is not None)
self.assertTrue(headers['content-type'] is not None)
self.assertEquals(headers['content-length'], str(len(content)))
# HEAD Object
status, headers, body = \
self.conn.make_request('HEAD', self.bucket, obj)
self.assertEquals(status, 200)
self.assertCommonResponseHeaders(headers, etag)
self.assertTrue(headers['last-modified'] is not None)
self.assertTrue('content-type' in headers)
self.assertEquals(headers['content-length'], str(len(content)))
# DELETE Object
status, headers, body = \
self.conn.make_request('DELETE', self.bucket, obj)
self.assertEquals(status, 204)
self.assertCommonResponseHeaders(headers)
def test_put_object_error(self):
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', self.bucket, 'object')
self.assertEquals(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'bucket2', 'object')
self.assertEquals(get_error_code(body), 'NoSuchBucket')
def test_put_object_copy_error(self):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
dst_bucket = 'dst_bucket'
self.conn.make_request('PUT', dst_bucket)
dst_obj = 'dst_object'
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', dst_bucket, dst_obj, headers)
self.assertEquals(get_error_code(body), 'SignatureDoesNotMatch')
# /src/nothing -> /dst/dst
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, 'nothing')}
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
self.assertEquals(get_error_code(body), 'NoSuchKey')
# /nothing/src -> /dst/dst
headers = {'X-Amz-Copy-Source': '/%s/%s' % ('nothing', obj)}
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
# TODO: source bucket is not check.
# self.assertEquals(get_error_code(body), 'NoSuchBucket')
# /src/src -> /nothing/dst
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj)}
status, headers, body = \
self.conn.make_request('PUT', 'nothing', dst_obj, headers)
self.assertEquals(get_error_code(body), 'NoSuchBucket')
def test_get_object_error(self):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('GET', self.bucket, obj)
self.assertEquals(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', self.bucket, 'invalid')
self.assertEquals(get_error_code(body), 'NoSuchKey')
status, headers, body = self.conn.make_request('GET', 'invalid', obj)
# TODO; requires consideration
# self.assertEquals(get_error_code(body), 'NoSuchBucket')
self.assertEquals(get_error_code(body), 'NoSuchKey')
def test_head_object_error(self):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('HEAD', self.bucket, obj)
self.assertEquals(status, 403)
status, headers, body = \
self.conn.make_request('HEAD', self.bucket, 'invalid')
self.assertEquals(status, 404)
status, headers, body = \
self.conn.make_request('HEAD', 'invalid', obj)
self.assertEquals(status, 404)
def test_delete_object_error(self):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', self.bucket, obj)
self.assertEquals(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('DELETE', self.bucket, 'invalid')
self.assertEquals(get_error_code(body), 'NoSuchKey')
status, headers, body = \
self.conn.make_request('DELETE', 'invalid', obj)
self.assertEquals(get_error_code(body), 'NoSuchBucket')
def test_put_object_content_encoding(self):
obj = 'object'
etag = md5().hexdigest()
headers = {'Content-Encoding': 'gzip'}
status, headers, body = \
self.conn.make_request('PUT', self.bucket, obj, headers)
self.assertEquals(status, 200)
status, headers, body = \
self.conn.make_request('HEAD', self.bucket, obj)
s |
0xdyu/RouteFlow-Exodus | pox/pox/lib/graph/nom.py | Python | apache-2.0 | 4,353 | 0.012176 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
"""
from pox.lib.revent import *
from pox.core import core
from pox.lib.addresses import *
from pox.lib.graph.graph import *
class EntityEvent (Event):
def __init__ (self, entity):
Event.__init__(self)
self.entity = entity
class EntityJoin (EntityEvent):
"""
An entity has been added.
Note that if there is a more specific join event defined for a particular
entity, (e.g., SwitchJoin), this event will not be fired.
TODO: or we could always raise EntityJoins along with SwitchJoins, which
seems more intuitive to me.
"""
pass
class EntityLeave (EntityEvent):
"""
An entity has been removed
Note that if there is a more specific leave event defined for a particular
entity, (e.g., SwitchLeave), this event will not be fired.
TODO: or we could always raise EntityLeaves along with SwitchLeaves, which
seems more intuitive to me.
"""
pass
class Update (Event):
"""
Fired by Topology whenever anything has changed
"""
def __init__ (self, event):
Event.__init__(self)
self.event = event
class Entity (Node):
"""
Note that the Entity class is intentionally simple; It only serves as a
convenient SuperClass type.
It's up to subclasses to implement specific functionality (e.g.
OpenFlow1.0 switch functionality). The purpose of this design decision
is to prevent protocol specific details from being leaked into this
module... but this design decision does /not/ imply that pox.toplogy
serves to define a generic interface to abstract entity types.
"""
class Host (Entity):
"""
A generic Host entity.
"""
def __init__(self):
Entity.__init__(self)
class Switch (Entity):
"""
Subclassed by protocol-specific switch classes,
e.g. pox.openflow.topology.OpenFlowSwitch
"""
"""
class Port (Entity):
def __init__ (self, num, hwAddr, name):
Entity.__init__(self)
self.number = num
self.hwAddr = EthAddr(hwAddr)
self.name = name
"""
class NOM (Graph, EventMixin):
__eventMixin_events = [
EntityJoin,
EntityLeave,
Update
]
def __init__ (self):
Graph.__init__(self)
EventMixin.__init__(self)
self._eventMixin_addEvents(self.__eventMixin_events)
self._entities = {}
self.log = core.getLogger(self.__class__.__name__)
def getEntityByID (self, ID, fail=False):
"""
Raises an exception if fail is True and the entity doesn't exist
See also: The 'entity' property.
"""
r = self.find(Or(Equal('DPID', ID),Equal(F('ID'), ID)))
if len(r) == 0:
if fail:
raise RuntimeError("No entity with ID " + str(ID))
else:
return None
assert len(r) == 1
return r[0]
def removeEntity (self, entity):
if entity in self:
self.remove(entity)
self.log.info(str(entity) + " left")
self.raiseEvent(EntityLeave, entity)
def addEntity (self, entity):
""" Will raise an exception if entity.id already exists """
if entity in self:
raise RuntimeError("Entity exists")
self.add(entity)
self.log.info(str(entity) + " joined")
self.raiseEvent(EntityJoin, entity)
def getEntitiesOfType (self, t=Entity, subtypes=True):
if subty | pes is False:
return self.find(is_a=t)
else:
return self.find(type=t)
def raiseEvent (self, event, *args, **kw):
"""
Whenever we raise any event, we also raise an Update, so we extend
the implementation in EventMixin.
"""
rv = EventMixin.raiseEvent(self, event, *args, **kw)
if type(event) is not Update:
EventMixin.raiseEvent(self, Update(event))
return rv
def __str__(self):
return "<%s len:%i>" % (self.__class__. | __name__, len(self))
|
shishengjia/OnlineCourses | extra_apps/xadmin/plugins/__init__.py | Python | apache-2.0 | 771 | 0.029831 |
PLUGINS = (
'actions',
'filters',
'bookmark',
'export',
'layout',
'refresh',
'detai | ls',
'editabl | e',
'relate',
'chart',
'ajax',
'relfield',
'inline',
'topnav',
'portal',
'quickform',
'wizard',
'images',
'auth',
'multiselect',
'themes',
'aggregation',
'mobile',
'passwords',
'sitemenu',
'language',
'quickfilter',
'sortablelist',
'ueditor'
)
def register_builtin_plugins(site):
from importlib import import_module
from django.conf import settings
exclude_plugins = getattr(settings, 'XADMIN_EXCLUDE_PLUGINS', [])
[import_module('xadmin.plugins.%s' % plugin) for plugin in PLUGINS if plugin not in exclude_plugins]
|
cpennington/edx-platform | lms/djangoapps/bulk_email/views.py | Python | agpl-3.0 | 2,070 | 0.000483 | """
Views to support bulk email functionalities like opt-out.
"""
import logging
from six import text_type
from django.contrib.auth.models import User
from django.http import Http404
from bulk_email.models import Optout
from courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_response
from lms.djangoapps.discussion.notification_prefs.views import (
UsernameCipher,
UsernameDecryptionException,
)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
def opt_out_email_updates(request, token, course_id):
"""
A view that let users opt out of any email updates.
This meant is meant to be the target of an opt-out link or button.
The `token` parameter must decrypt to a valid username.
The `course_id` is the string course key of any course.
Raises a 404 if there are any errors parsing the input.
"""
| try:
username = UsernameCipher().decrypt(token)
user = User.objects.get(username=username)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key, depth=0)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(text_type(exn))
except User.DoesNotExist:
raise Http404("username")
except InvalidKeyError:
raise H | ttp404("course")
unsub_check = request.POST.get('unsubscribe', False)
context = {
'course': course,
'unsubscribe': unsub_check
}
if request.method == 'GET':
return render_to_response('bulk_email/confirm_unsubscribe.html', context)
if request.method == 'POST' and unsub_check:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
return render_to_response('bulk_email/unsubscribe_success.html', context)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.