code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "dev.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
| orlenko/bccf | src/bccf/local_settings_sample.py | Python | unlicense | 572 |
import sys
import json
'''
take a file and unwrap the contents to a dictionary.
write the key=value terms out.
The program is intended to be caled from a bash
program to export the key=value pairs as environemnt
variables
'''
def main(argv):
a = open(argv[0], 'r')
b = a.readlines()[0].strip()
data = json.loads(b)[0]
for d in data.iterkeys():
print "_docker_%s=%s" % (d.lower(), (str(data[d]).replace(' ', '')))
if __name__ == "__main__":
main(sys.argv[1:])
| kylemvz/nbhub | nopleats/readData.py | Python | apache-2.0 | 494 |
remote_server_ips = ('127.0.0.1', '127.0.0.1')
remote_server_ports = (8005, 8006)
assigned_server_index = 1 # in real system, client is distributed by a load balancing server in general; here I just simulate the balancing policy.
process_id = 4
client_addr = ('127.0.0.1', 7004)
poisson_lambda = 5
simu_len = 60
get_score_pb = 0.8
| SuperMass/distOS-lab3 | src/integrated/client4/client_config.py | Python | gpl-3.0 | 334 |
#!/usr/bin/python
import os, sys
from emu_parse_output import *
from compute_route_quality import *
sys.path.append ('./emulation')
# Global variables
VALIDITY_GNUPLOT = './rapidnet/route-quality/validity.gnuplot'
STRETCH_GNUPLOT = './rapidnet/route-quality/stretch.gnuplot'
# dir: The directory where to find the output log files from emulation, the dump files and graphs will also be created here
# apptable-period: period of dumping apptables
# duration: duration of the emulation
# orbit|cluster
# blacklist: comma separated list of blacklisted nodes, use node-ids as in nodesmap file, leave empty if none
# Check arguments
if len (sys.argv) < 6:
print 'Usage: emu_route_quality <dir> <apptable-period> <duration> <node-count> <orbit|cluster> [blacklist]'
sys.exit (0)
# Read command line arguments
dir = sys.argv[1]
period = int (sys.argv[2])
base = period
duration = int (sys.argv[3])
nodecount = int (sys.argv[4])
if sys.argv[5] == 'orbit':
from nodesmap_orbit import *
else:
from nodesmap_clust import *
blacklist = []
if len (sys.argv) > 6:
listofnodes = sys.argv[6].split(',')
for node in listofnodes:
blacklist += [int (node)]
# Parse the output file
parse_all_outputs (dir, nodecount, nodes, blacklist)
times = range (base+period, duration+period, period)
notfound = []
route_quality = {}
# Compute route quality for every time slot
for time in times:
routes_file = os.path.join (dir, 'routes/route_%3.3d.py' % time)
if not os.path.exists (routes_file):
print 'Routes file not found: ', routes_file
notfound += [time]
continue
else:
execfile (routes_file)
print 'Processing for time: ', time
route_quality[time] = computeRouteQuality (tLink, tLSU, nodecount)
# Dump validity to files for plotting
validity_filename = os.path.join (dir, 'validity.points')
validity_file = open (validity_filename, 'w')
stretch_filename = os.path.join (dir, 'stretch.points')
stretch_file = open (stretch_filename, 'w')
print 'Writing validity data to ', validity_filename
print 'Writing stretch data to ', stretch_filename
for time in times:
if time not in notfound:
validity_file.write ('%d %f %d %d %d %d %.1f %d\n' % (time, route_quality[time][0], route_quality[time][2], \
route_quality[time][3], route_quality[time][4], route_quality[time][5], route_quality[time][6], route_quality[time][7]))
stretch_file.write ('%d %f\n' % (time, route_quality[time][1]))
validity_file.close ()
stretch_file.close ()
# Plot validity using gnuplot
gnuplot_script = open (VALIDITY_GNUPLOT, 'r').read ()
open ('temp.gnuplot', 'w').write (gnuplot_script % validity_filename)
validity_imagefile = os.path.join (dir, 'validity.ps')
print 'Plotting validity to file: ', validity_imagefile
os.system ('gnuplot temp.gnuplot > %s' % validity_imagefile)
# Plot stretch using gnuplot
gnuplot_script = open (STRETCH_GNUPLOT, 'r').read ()
open ('temp.gnuplot', 'w').write (gnuplot_script % stretch_filename)
stretch_imagefile = os.path.join (dir, 'stretch.ps')
print 'Plotting stretch to file: ', stretch_imagefile
os.system ('gnuplot temp.gnuplot > %s' % stretch_imagefile)
os.system ('rm temp.gnuplot')
| AliZafar120/NetworkStimulatorSPl3 | rapidnet/route-quality/emu_route_quality.py | Python | gpl-2.0 | 3,159 |
# PLY package
# Author: David Beazley (dave@dabeaz.com)
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__all__ = ['lex','yacc']
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/ply/__init__.py | Python | mit | 1,797 |
from core.utils import *
from pytg.receiver import Receiver
from pytg.sender import Sender
from pytg.utils import coroutine
import json
tgreceiver = Receiver(host="localhost", port=config.keys.tg_cli_port)
tgsender = Sender(host="localhost", port=config.keys.tg_cli_port)
# Telegram-CLI bindings
def peer(chat_id):
if chat_id > 0:
peer = 'user#id' + str(chat_id)
else:
if str(chat_id)[1:].startswith('100'):
peer = 'channel#id' + str(chat_id)[4:]
else:
peer = 'chat#id' + str(chat_id)[1:]
return peer
def user_id(username):
if username.startswith('@'):
command = 'resolve_username ' + username[1:]
resolve = tgsender.raw(command)
dict = DictObject(json.loads(resolve))
else:
dict = tgsender.user_info(username)
if 'peer_id' in dict:
return dict.peer_id
else:
return False
def get_id(user):
if isinstance(user, int):
return user
if user.isdigit():
id = int(user)
else:
id = int(user_id(user))
return id
def escape(string):
if string is None:
return None
CHARS_UNESCAPED = ["\\", "\n", "\r", "\t", "\b", "\a", "'"]
CHARS_ESCAPED = ["\\\\", "\\n", "\\r", "\\t", "\\b", "\\a", "\\'"]
for i in range(0, 7):
string = string.replace(CHARS_UNESCAPED[i], CHARS_ESCAPED[i])
return string.join(["'", "'"]) # wrap with single quotes.
# Standard methods for bindings
def get_me():
msg = tgsender.get_self()
bot.first_name = msg.first_name
bot.username = msg.username
bot.id = msg.peer_id
def kick_chat_member(chat, user):
if str(chat).startswith('-100'):
result = tgsender.channel_kick(peer(chat), peer(get_id(user)))
else:
result = tgsender.chat_del_user(peer(chat), peer(get_id(user)))
except:
error = str(sys.exc_info()[1]).split()[4].rstrip("'")
else:
if hasattr(result, 'result') and result.result == 'FAIL':
error = result.error.split()[-1]
else:
return True
if error == 'CHAT_ADMIN_REQUIRED':
raise PolarisExceptions.NotAdminException()
else:
raise PolarisExceptions.FailedException()
def unban_chat_member(chat, user):
pass
def convert_message(msg):
id = msg['id']
if msg.receiver.type == 'user':
receiver = User()
receiver.id = int(msg.receiver.peer_id)
receiver.first_name = msg.receiver.first_name
if 'first_name' in msg.receiver:
receiver.first_name = msg.receiver.first_name
if 'last_name' in msg.receiver:
receiver.last_name = msg.receiver.last_name
if 'username' in msg.receiver:
receiver.username = msg.receiver.username
else:
receiver = Group()
if msg.receiver.type == 'channel':
receiver.id = - int('100' + str(msg.receiver.peer_id))
else:
receiver.id = - int(msg.receiver.peer_id)
receiver.title = msg.receiver.title
if msg.sender.type == 'user':
sender = User()
sender.id = int(msg.sender.peer_id)
sender.first_name = msg.sender.first_name
if 'first_name' in msg.sender:
sender.first_name = msg.sender.first_name
if 'last_name' in msg.sender:
sender.last_name = msg.sender.last_name
if 'username' in msg.sender:
sender.username = msg.sender.username
else:
sender = Group()
if msg.sender.type == 'channel':
sender.id = - int('100' + str(msg.sender.peer_id))
else:
sender.id = - int(msg.sender.peer_id)
sender.title = msg.sender.title
date = msg.date
# Gets the type of the message
if 'text' in msg:
type = 'text'
content = msg.text
extra = None
elif 'media' in msg:
type = msg.media.type
content = msg.id
if 'caption' in msg.media:
extra = msg.media.caption
else:
extra = None
elif msg.event == 'service':
type = 'service'
if msg.action.type == 'chat_del_user':
content = 'left_user'
extra = msg.action.user.peer_id
elif msg.action.type == 'chat_add_user':
content = 'join_user'
extra = msg.action.user.peer_id
elif msg.action.type == 'chat_add_user_link':
content = 'join_user'
extra = msg.sender.peer_id
else:
type = None
content = None
extra = None
else:
type = None
content = None
extra = None
# Generates another message object for the original message if the reply.
if 'reply_id' in msg:
reply_msg = tgsender.message_get(msg.reply_id)
reply = convert_message(reply_msg)
else:
reply = None
return Message(id, sender, receiver, content, type, date, reply, extra)
def send_message(message):
if message.type == 'text':
tgsender.send_typing(peer(message.receiver.id), 1)
if message.markup == 'Markdown':
message.content = remove_markdown(message.content)
elif message.markup == 'HTML':
message.content = remove_html(message.content)
try:
tgsender.send_msg(peer(message.receiver.id), message.content, enable_preview=message.extra)
except:
tgsender.raw('post ' + peer(message.receiver.id) + ' ' + escape(message.content), enable_preview=message.extra)
elif message.type == 'photo':
tgsender.send_typing(peer(message.receiver.id), 1) # 7
try:
tgsender.send_photo(peer(message.receiver.id), message.content.name, message.extra)
except:
tgsender.raw('post_photo %s %s %s' % (peer(message.receiver.id), message.content.name, escape(message.extra)))
elif message.type == 'audio':
tgsender.send_typing(peer(message.receiver.id), 1) # 6
try:
tgsender.send_audio(peer(message.receiver.id), message.content.name)
except:
tgsender.raw('post_audio %s %s %s' % (peer(message.receiver.id), message.content.name, escape(message.extra)))
elif message.type == 'document':
tgsender.send_typing(peer(message.receiver.id), 1) # 8
try:
# tgsender.send_document(peer(message.receiver.id), message.content.name, message.extra)
tgsender.send_document(peer(message.receiver.id), message.content.name, escape(message.extra))
except:
tgsender.raw('post_document %s %s %s' % (peer(message.receiver.id), message.content.name, escape(message.extra)))
elif message.type == 'sticker':
tgsender.send_file(peer(message.receiver.id), message.content.name)
elif message.type == 'video':
tgsender.send_typing(peer(message.receiver.id), 1) # 4
try:
tgsender.send_video(peer(message.receiver.id), message.content.name)
except:
tgsender.raw('post_video %s %s' % (peer(message.receiver.id), message.content.name))
elif message.type == 'voice':
tgsender.send_typing(peer(message.receiver.id), 5)
try:
tgsender.send_audio(peer(message.receiver.id), message.content.name)
except:
tgsender.raw('post_audio %s %s' % (peer(message.receiver.id), message.content.name))
elif message.type == 'location':
tgsender.send_typing(peer(message.receiver.id), 1) # 9
tgsender.send_location(peer(message.receiver.id), message.content, message.extra)
else:
print('UNKNOWN MESSAGE TYPE: ' + message.type)
def inbox_listener():
@coroutine
def listener():
while (True):
msg = (yield)
if (msg.event == 'message' and msg.own == False) or msg.event == 'service':
message = convert_message(msg)
inbox.put(message)
try:
if message.receiver.id > 0:
tgsender.mark_read(peer(message.sender.id))
else:
tgsender.mark_read(peer(message.receiver.id))
except:
pass
tgreceiver.start()
tgreceiver.message(listener())
| zhantyzgz/polaris | core/wrapper/tg.py | Python | gpl-2.0 | 8,398 |
import pytest
from pyethereum import tester, blocks
mul2_code = \
'''
def double(v):
return(v*2)
'''
filename = "mul2_qwertyuioplkjhgfdsa.se"
returnten_code = \
'''
extern mul2: [double]
x = create("%s")
return(x.double(5))
''' % filename
def test_returnten():
s = tester.state()
open(filename, 'w').write(mul2_code)
c = s.contract(returnten_code)
s.send(tester.k0, c, 0, [])
b2 = blocks.Block.deserialize(s.db, s.block.serialize())
assert b2.serialize() == s.block.serialize()
| joelcan/tools-eth-contract-dev | pyethereum/tests/test_serialization.py | Python | mit | 521 |
#!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for cinder management.
"""
from __future__ import print_function
import logging as python_logging
import os
import sys
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import migration
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder.common import constants
from cinder import context
from cinder import db
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
from cinder.volume import utils as vutils
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell',
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly."""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print('%s' % caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'cinder-manage db sync' before running this command."))
sys.exit(1)
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone=None):
"""Show a list of all physical hosts.
Can be filtered by zone.
args: [zone]
"""
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
if zone:
services = [s for s in services if s.availability_zone == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print(_("%(host)-25s\t%(availability_zone)-15s")
% {'host': h['host'],
'availability_zone': h['availability_zone']})
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('version', nargs='?', default=None,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return db_migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days):
"""Purge deleted rows older than a given age from cinder tables."""
age_in_days = int(age_in_days)
if age_in_days <= 0:
print(_("Must supply a positive, non-zero value for age"))
sys.exit(1)
if age_in_days >= (int(time.time()) / 86400):
print(_("Maximum age is count of days since epoch."))
sys.exit(1)
ctxt = context.get_admin_context()
try:
db.purge_deleted_rows(ctxt, age_in_days)
except db_exc.DBReferenceError:
print(_("Purge command failed, check cinder-manage "
"logs for more details."))
sys.exit(1)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state."""
def __init__(self):
self._client = None
def _rpc_client(self):
if self._client is None:
if not rpc.initialized():
rpc.init(CONF)
target = messaging.Target(topic=constants.VOLUME_TOPIC)
serializer = objects.base.CinderObjectSerializer()
self._client = rpc.get_client(target, serializer=serializer)
return self._client
@args('volume_id',
help='Volume ID to be deleted')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it must be available."""
ctxt = context.get_admin_context()
volume = objects.Volume.get_by_id(ctxt, volume_id)
host = vutils.extract_host(volume.host) if volume.host else None
if not host:
print(_("Volume not yet assigned to host."))
print(_("Deleting volume from database and skipping rpc."))
volume.destroy()
return
if volume.status == 'in-use':
print(_("Volume is in-use."))
print(_("Detach volume from instance and then try again."))
return
cctxt = self._rpc_client().prepare(server=host)
cctxt.cast(ctxt, "delete_volume", volume_id=volume.id, volume=volume)
@args('--currenthost', required=True, help='Existing volume host name')
@args('--newhost', required=True, help='New volume host name')
def update_host(self, currenthost, newhost):
"""Modify the host name associated with a volume.
Particularly to recover from cases where one has moved
their Cinder Volume node, or modified their backend_name in a
multi-backend config.
"""
ctxt = context.get_admin_context()
volumes = db.volume_get_all_by_host(ctxt,
currenthost)
for v in volumes:
db.volume_update(ctxt, v['id'],
{'host': newhost})
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param=None):
"""List parameters configured for cinder.
Lists all parameters configured for cinder unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.items():
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print(_("Line %(dis)d : %(line)s") %
{'dis': len(lines) - index, 'line': line})
if error_found == 0:
print(_("No errors in logfiles!"))
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_("Unable to find system log file!"))
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_("Last %s cinder syslog entries:-") % (entries))
for line in lines:
if line.find("cinder") > 0:
count += 1
print(_("%s") % (line))
if count == entries:
break
if count == 0:
print(_("No cinder entries in syslog!"))
class BackupCommands(object):
"""Methods for managing backups."""
def list(self):
"""List all backups.
List all backups (including ones in progress) and the host
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
_('User ID'),
_('Project ID'),
_('Host'),
_('Name'),
_('Container'),
_('Status'),
_('Size'),
_('Object Count')))
res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d"
for backup in backups:
object_count = 0
if backup['object_count'] is not None:
object_count = backup['object_count']
print(res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
object_count))
@args('--currenthost', required=True, help='Existing backup host name')
@args('--newhost', required=True, help='New backup host name')
def update_backup_host(self, currenthost, newhost):
"""Modify the host name associated with a backup.
Particularly to recover from cases where one has moved
their Cinder Backup node, and not set backup_use_same_backend.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all_by_host(ctxt, currenthost)
for bk in backups:
bk.host = newhost
bk.save()
class BaseCommand(object):
@staticmethod
def _normalize_time(time_field):
return time_field and timeutils.normalize_time(time_field)
@staticmethod
def _state_repr(is_up):
return ':-)' if is_up else 'XXX'
class ServiceCommands(BaseCommand):
"""Methods for managing services."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s"
print(print_format % (_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At'),
_('RPC Version'),
_('Object Version'),
_('Cluster')))
for svc in services:
art = self._state_repr(utils.service_is_up(svc))
status = 'disabled' if svc.disabled else 'enabled'
updated_at = self._normalize_time(svc.updated_at)
rpc_version = svc.rpc_current_version
object_version = svc.object_current_version
cluster = svc.cluster_name or ''
print(print_format % (svc.binary, svc.host.partition('.')[0],
svc.availability_zone, status, art,
updated_at, rpc_version, object_version,
cluster))
@args('binary', type=str,
help='Service to delete from the host.')
@args('host_name', type=str,
help='Host from which to remove the service.')
def remove(self, binary, host_name):
"""Completely removes a service."""
ctxt = context.get_admin_context()
try:
svc = objects.Service.get_by_args(ctxt, host_name, binary)
svc.destroy()
except exception.ServiceNotFound as e:
print(_("Host not found. Failed to remove %(service)s"
" on %(host)s.") %
{'service': binary, 'host': host_name})
print(u"%s" % e.args)
return 2
print(_("Service %(service)s on host %(host)s removed.") %
{'service': binary, 'host': host_name})
class ClusterCommands(BaseCommand):
"""Methods for managing clusters."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
clusters = objects.ClusterList.get_all(ctxt, services_summary=True)
print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s"
print(print_format % (_('Name'),
_('Binary'),
_('Status'),
_('State'),
_('Heartbeat'),
_('Hosts'),
_('Down Hosts'),
_('Updated At')))
for cluster in clusters:
art = self._state_repr(cluster.is_up())
status = 'disabled' if cluster.disabled else 'enabled'
heartbeat = self._normalize_time(cluster.last_heartbeat)
updated_at = self._normalize_time(cluster.updated_at)
print(print_format % (cluster.name, cluster.binary, status, art,
heartbeat, cluster.num_hosts,
cluster.num_down_hosts, updated_at))
@args('--recursive', action='store_true', default=False,
help='Delete associated hosts.')
@args('binary', type=str,
help='Service to delete from the cluster.')
@args('cluster-name', type=str, help='Cluster to delete.')
def remove(self, recursive, binary, cluster_name):
"""Completely removes a cluster."""
ctxt = context.get_admin_context()
try:
cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name,
binary=binary,
get_services=recursive)
except exception.ClusterNotFound:
print(_("Couldn't remove cluster %s because it doesn't exist.") %
cluster_name)
return 2
if recursive:
for service in cluster.services:
service.destroy()
try:
cluster.destroy()
except exception.ClusterHasHosts:
print(_("Couldn't remove cluster %s because it still has hosts.") %
cluster_name)
return 2
msg = _('Cluster %s successfully removed.') % cluster_name
if recursive:
msg = (_('%(msg)s And %(num)s services from the cluster were also '
'removed.') % {'msg': msg, 'num': len(cluster.services)})
print(msg)
@args('--full-rename', dest='partial',
action='store_false', default=True,
help='Do full cluster rename instead of just replacing provided '
'current cluster name and preserving backend and/or pool info.')
@args('current', help='Current cluster name.')
@args('new', help='New cluster name.')
def rename(self, partial, current, new):
"""Rename cluster name for Volumes and Consistency Groups.
Useful when you want to rename a cluster, particularly when the
backend_name has been modified in a multi-backend config or we have
moved from a single backend to multi-backend.
"""
ctxt = context.get_admin_context()
# Convert empty strings to None
current = current or None
new = new or None
# Update Volumes
num_vols = objects.VolumeList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
# Update Consistency Groups
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
if num_vols or num_cgs:
msg = _('Successfully renamed %(num_vols)s volumes and '
'%(num_cgs)s consistency groups from cluster %(current)s '
'to %(new)s')
print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new,
'current': current})
else:
msg = _('No volumes or consistency groups exist in cluster '
'%(current)s.')
print(msg % {'current': current})
return 2
CATEGORIES = {
'backup': BackupCommands,
'config': ConfigCommands,
'cluster': ClusterCommands,
'db': DbCommands,
'host': HostCommands,
'logs': GetLogCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'version': VersionCommands,
'volume': VolumeCommands,
}
def methods_of(obj):
"""Return non-private methods from an object.
Get all callable methods of an object that don't start with underscore
:return: a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
args = args[2:]
else:
args = args[1:]
# We convert dashes to underscores so we can have cleaner optional arg
# names
if args:
args = args.replace('-', '_')
return args
def fetch_func_args(func):
fn_kwargs = {}
for args, kwargs in getattr(func, 'args', []):
# Argparser `dest` configuration option takes precedence for the name
arg = kwargs.get('dest') or get_arg_string(args[0])
fn_kwargs[arg] = getattr(CONF.category, arg)
return fn_kwargs
def main():
objects.register_all()
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack Cinder version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
except cfg.ConfigDirNotFoundError as details:
print(_("Invalid directory: %s") % details)
sys.exit(2)
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run cinder-manage as root.'))
sys.exit(2)
fn = CONF.category.action_fn
fn_kwargs = fetch_func_args(fn)
fn(**fn_kwargs)
| Nexenta/cinder | cinder/cmd/manage.py | Python | apache-2.0 | 25,926 |
import numpy as np
from numpy.linalg import norm
from math import copysign
class HyperEdge:
@staticmethod
def edge(allEdges, name, face, angle=0):
if angle is None:
angle = 0
if allEdges is not None:
for e in allEdges:
if e.name == name:
e.join(face, angle=angle)
return e
e = HyperEdge(name, face, angle)
try:
allEdges.append(e)
except:
pass
return e
def __init__(self, name, face=None, angle=0):
self.name = name
#self.pt1 = pt1
#self.pt2 = pt2
if face:
self.faces = {face: angle}
else:
self.faces = {}
def remove(self, face):
if face in self.faces:
self.faces.pop(face)
try:
e = face.edges.index(self)
face.disconnect(e)
except (ValueError, AttributeError):
pass
def rename(self, name):
self.name = name
def setAngle(self, face, angle):
if face in self.faces:
self.faces[face] = angle
def join(self, face, fromface=None, angle = 0, flipped = True):
baseangle = 0
if fromface in self.faces:
baseangle = self.faces[fromface]
newangle = (abs(baseangle)+angle) % 360
if flipped:
newangle = copysign(newangle, -baseangle)
else:
newangle = copysign(newangle, baseangle)
self.faces[face] = newangle
'''
def matches(self, other):
return self.length() == other.length()
'''
def mergeWith(self, other, angle=0, flip=False):
# TODO : flip orientation of edge
if other is None:
return self
for face in other.faces.keys():
da = other.faces.pop(face)
face.replaceEdge(other, self)
self.faces[face] = angle + da
return self
'''
def split(self, lengths, names=None):
tol = 1e-3 # 0.1% tolerance
edges = []
lastpt = self.pt1
d = self.pt2 - self.pt1
totlen = 0
index = 0
for length in lengths:
totlen += length
try:
name = names[index]
except:
name = self.name + ".s%d" % index
if totlen >= (1-tol) * self.length():
# less than 0.1% is probably just rounding error
if abs(totlen - self.length()) * 1.0 / totlen > .001:
print 'length exceeded by ' + repr(totlen - self.length())
break
e = HyperEdge(name, lastpt, self.pt1 + totlen/self.length() * d)
lastpt = self.pt1 + totlen/self.length() * d
edges.append(e)
index += 1
e = HyperEdge(name, lastpt, self.pt2)
edges.append(e)
return edges
'''
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name + ": " + repr(self.faces)
def __repr__(self):
# return self.name + " [ # faces : %d, len : %d ]" % (len(self.faces), self.length)
ret = "%s#%d" % (self.name, len(self.faces))
if len(self.faces) > 1:
return ret + repr(self.faces.values())
else:
return ret
| PRECISE/ROSLab | resources/mechanics_lib/api/graphs/hyperedge.py | Python | apache-2.0 | 2,917 |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'whatify.views.index'),
url(r'^search/(.+)$', 'whatify.views.search'),
url(r'^torrent_groups/(\d+)$', 'whatify.views.get_torrent_group'),
url(r'^torrent_groups/(\d+)/download$', 'whatify.views.download_torrent_group'),
url(r'^torrent_groups/random$', 'whatify.views.random_torrent_groups'),
url(r'^torrent_groups/top10$', 'whatify.views.top10_torrent_groups'),
url(r'^artists/(\d+)$', 'whatify.views.get_artist'),
)
| grandmasterchef/WhatManager2 | whatify/urls.py | Python | mit | 531 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['SVR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_Hour_SVR.py | Python | bsd-3-clause | 153 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Compilation Prerequisite
import distutils.version
import encodings.ascii
import encodings.idna
import encodings.unicode_escape
import tornado.websocket
import tori.db.driver.mongodriver
# Runtime Requirements
import sys
from tori.application import Application
from tori.centre import services
def main(name, args):
application = Application('config/dev.xml')
if not args:
print('USAGE: python {} <base_path>[ <port:8000>]'.format(name))
sys.exit(255)
base_path = args[0]
services.get('internal.finder').set_base_path(base_path)
if len(args) > 1:
port = args[1]
application.listen(port)
application.start()
if __name__ == '__main__':
main(sys.argv[0], sys.argv[1:])
| shiroyuki/tama | server.py | Python | mit | 786 |
from __future__ import print_function
from math import pi
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.models.glyphs import Line, Quad
from bokeh.models import (
Plot, ColumnDataSource, DataRange1d, FactorRange,
LinearAxis, CategoricalAxis, Grid, Legend,
SingleIntervalTicker
)
from bokeh.sampledata.population import load_population
from bokeh.models.widgets import Select
from bokeh.models.layouts import WidgetBox, Column
document = Document()
session = push_session(document)
df = load_population()
revision = 2012
year = 2010
location = "World"
years = [str(x) for x in sorted(df.Year.unique())]
locations = sorted(df.Location.unique())
source_pyramid = ColumnDataSource(data=dict(female=[], male=[], groups=[], shifted=[]))
def pyramid():
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=500, toolbar_location=None)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
male_quad = Quad(left="male", right=0, bottom="groups", top="shifted", fill_color="#3B8686")
male_quad_glyph = plot.add_glyph(source_pyramid, male_quad)
female_quad = Quad(left=0, right="female", bottom="groups", top="shifted", fill_color="#CFF09E")
female_quad_glyph = plot.add_glyph(source_pyramid, female_quad)
plot.add_layout(Legend(items=[
("Male" , [male_quad_glyph]),
("Female" , [female_quad_glyph]),
]))
return plot
source_known = ColumnDataSource(data=dict(x=[], y=[]))
source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=150, toolbar_location=None)
plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')
line_known = Line(x="x", y="y", line_color="violet", line_width=2)
line_known_glyph = plot.add_glyph(source_known, line_known)
line_predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
line_predicted_glyph = plot.add_glyph(source_predicted, line_predicted)
plot.add_layout(
Legend(
location="bottom_right",
items=[("known", [line_known_glyph]), ("predicted", [line_predicted_glyph])],
)
)
return plot
def update_pyramid():
pyramid = df[(df.Location == location) & (df.Year == year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = male.Value.sum() + female.Value.sum()
male_percent = -male.Value / total
female_percent = female.Value / total
groups = male.AgeGrpStart.tolist()
shifted = groups[1:] + [groups[-1] + 5]
source_pyramid.data = dict(
groups=groups,
shifted=shifted,
male=male_percent,
female=female_percent,
)
def update_population():
population = df[df.Location == location].groupby(df.Year).Value.sum()
aligned_revision = revision // 10 * 10
known = population[population.index <= aligned_revision]
predicted = population[population.index >= aligned_revision]
source_known.data = dict(x=known.index.map(str), y=known.values)
source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
def update_data():
update_population()
update_pyramid()
def on_year_change(attr, old, new):
global year
year = int(new)
update_data()
def on_location_change(attr, old, new):
global location
location = new
update_data()
def create_layout():
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', on_year_change)
location_select.on_change('value', on_location_change)
controls = WidgetBox(children=[year_select, location_select], height=150, width=600)
layout = Column(children=[controls, pyramid(), population()])
return layout
layout = create_layout()
update_data()
document.add_root(layout)
session.show(layout)
if __name__ == "__main__":
document.validate()
print("\npress ctrl-C to exit")
session.loop_until_closed()
| azjps/bokeh | examples/models/population_server.py | Python | bsd-3-clause | 4,476 |
# imports/modules
import os
import random
import json
import collections
from PIL import Image
# Convert (r, g, b) into #rrggbb color
def getRGBstring( (r, g, b) ):
s = "#"
s = s + format(r, '02x')
s = s + format(g, '02x')
s = s + format(b, '02x')
return s
def getFreqData(img):
w, h = img.size
pixels = img.load()
freq = collections.Counter()
for x in range(w):
for y in range(h):
color = getRGBstring( pixels[x, y] )
freq[ color ] += 1
return freq
def do_compute():
# Open the image
origImgFile = 'res/bryce.jpg'
origImg = Image.open(origImgFile)
# Process the image
freq = getFreqData(origImg)
# freq:
# { "#rrggbb": 100,
# "#33ff22": 200,
# "#66aa9c": 300,
# ...
# }
# Save the processed information
output = { 'file': origImgFile,
'freq': freq }
f = open("res/freq.json",'w')
s = json.dumps(output, indent = 4)
f.write(s)
| CS205IL-sp15/workbook | demo_colorFreq_endOfLecture/py/compute.py | Python | mit | 1,013 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib
from urllib.request import Request
import hmac
import hashlib
import base64
import datetime
import sys
if len(sys.argv) < 4:
print('bad syntax, usage: {script_name} host bname oname')
exit()
host, bname, oname = sys.argv[1], sys.argv[2], sys.argv[3]
method = 'DELETE'
# demouserid
#access_key = 'Z2ETKC4RQFTR4XBQ1A72'
#secret_key = 'vqdQGtmruGW855mduffA8lsLx+ot9iXIb9QTtT2I'
#hr
#access_key = "9M3C3NCBEWSRDPRJGL0O"
#secret_key = "QCS0ju6dkqblLVQe966KwuE2Cg6cCfS/S2u2K+Qt"
# demo from local vcenter
#access_key = 'YG9YGNNYN46ARJH1MOEJ'
#secret_key = 'mxzTzqF7XZx00hmy7n4qzUQ5mKinYywuRD2xV4ka'
# demo from wx
access_key = '73SAVVNQIIKSJCIFUDZF'
secret_key = 'aZCMX8DwqSRIx4MgFbCctMNlyZTld28aeYhsDZYM'
#eleme
#access_key = 'VI8LSAC5JOFE99B066FC'
#secret_key = 'm6ok1UbM+eTBqXXHRsAJ6PbUh3fmZDDfmOnHKk3M'
req = Request('http://' + host + '/' + bname + '/' + oname,
method = method)
timestr = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
req.add_header('Host', host)
req.add_header('Date', timestr)
#req.add_header('x-amz-acl', 'public-read-write')
hstr = ''
hstr += method + '\n'
hstr += '\n'
hstr += '\n'
hstr += timestr + '\n'
#hstr += 'x-amz-acl:public-read-write\n'
hstr += '/' + bname + '/' + oname
#print('hstr:%s' % (hstr,))
key = bytearray(secret_key, 'utf-8')
hres = hmac.new(key, hstr.encode('utf-8'), hashlib.sha1).digest()
#print('type:%s' % (type(hres, )))
hres = base64.b64encode(hres)
hres = hres.decode('utf-8')
#print('hres:%s' % (hres,))
req.add_header('Authorization', 'AWS ' + access_key + ':' + hres)
with urllib.request.urlopen(req) as f:
#print(f.read().decode('utf-8'))
print(f.status)
| IvanJobs/play | ceph/s3/delete_object.py | Python | mit | 1,734 |
import os
import os.path
from tornado.escape import xhtml_escape
from pygments import highlight, util as pyg_util
from pygments.lexers import get_lexer_for_filename
from pygments.formatters import HtmlFormatter
def preview(path, filesize, mimetype):
major,minor = mimetype.split('/',1)
if major not in mime_handlers:
return mime_handlers['*'](path, filesize, mimetype)
handlers = mime_handlers[major]
if minor not in handlers:
return handlers['*'](path, filesize, mimetype)
return handlers[minor](path, filesize, mimetype)
def preview_plain_text(path, filesize, mimetype):
with open(path) as fh:
try: # attempt to syntax highlight
lexer = get_lexer_for_filename(path)
formatter = HtmlFormatter(linenos=True, cssclass="source")
yield '<style type="text/css">'
yield formatter.get_style_defs()
yield '</style>'
yield highlight(''.join(fh),lexer,formatter)
except pyg_util.ClassNotFound:
#TODO: buffer this, use async
# http://www.tornadoweb.org/documentation/ioloop.html
for line in fh:
yield xhtml_escape(line)
def preview_image(path, filesize, mimetype):
name = os.path.basename(path)
tmp_img = os.path.join(os.path.dirname(__file__), '../frontend/img_links', name)
if not os.path.lexists(tmp_img): # lol hax
os.symlink(path,tmp_img)
yield '<img src="/static/img_links/%s"></img>'%name
def preview_pdf(path, filesize, mimetype):
name = os.path.basename(path)
yield 'PDF file: %s' % name
yield 'TODO: inline PDF viewer'
def preview_binary(path, filesize, mimetype):
name = os.path.basename(path)
yield 'Binary file: %s (%s), %d bytes' % (name,mimetype,filesize)
# Map of mime types to handler functions. Dispatched on by preview().
mime_handlers = {
'text': {
'*': preview_plain_text,
},
'image': {
'*': preview_image,
},
'application': {
'pdf': preview_pdf,
'*': preview_binary,
},
'*': preview_binary,
}
| perimosocordiae/whish | backend/preview.py | Python | mit | 1,990 |
# -*- coding: utf-8 -*-
"""
equip.analysis.ast
~~~~~~~~~~~~~~~~~~
Minimal, high-level AST for the Python bytecode.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
from .stmt import Statement
from .expr import Expression
| neuroo/equip | equip/analysis/ast/__init__.py | Python | apache-2.0 | 289 |
import unittest
from django.test import TestCase
from .test_backends import BackendTests
class TestDBBackend(BackendTests, TestCase):
backend_path = 'wagtail.wagtailsearch.backends.db.DBSearch'
@unittest.expectedFailure
def test_callable_indexed_field(self):
super(TestDBBackend, self).test_callable_indexed_field()
| jorge-marques/wagtail | wagtail/wagtailsearch/tests/test_db_backend.py | Python | bsd-3-clause | 341 |
"""
Stories for Fimfarchive.
"""
#
# Fimfarchive, preserves stories from Fimfiction.
# Copyright (C) 2018 Joakim Soderlund
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from typing import TYPE_CHECKING, Any, Dict, Iterable, TypeVar
from fimfarchive.exceptions import StorySourceError
if TYPE_CHECKING:
from fimfarchive.fetchers import Fetcher
from fimfarchive.flavors import Flavor
else:
Fetcher = TypeVar('Fetcher')
Flavor = TypeVar('Flavor')
__all__ = (
'Story',
)
class Story:
"""
Represents a story.
"""
def __init__(
self,
key: int,
fetcher: Fetcher = None,
meta: Dict[str, Any] = None,
data: bytes = None,
flavors: Iterable[Flavor] = (),
) -> None:
"""
Constructor.
Args:
key: Primary key of the story.
fetcher: Fetcher to use for lazy fetching.
meta: Meta to populate the story with.
data: Data to populate the story with.
flavors: Content type hints.
"""
if fetcher is None and (data is None or meta is None):
raise ValueError("Story must contain fetcher if lazy.")
self.key = key
self.fetcher = fetcher
self.flavors = set(flavors)
self._meta = meta
self._data = data
@property
def is_fetched(self) -> bool:
"""
True if no more fetches are necessary.
"""
return self.has_meta and self.has_data
@property
def has_meta(self) -> bool:
"""
True if story meta has been fetched.
"""
return self._meta is not None
@property
def meta(self) -> Dict[str, Any]:
"""
Returns the story meta.
Meta may be fetched if this story instance is lazy.
Raises:
InvalidStoryError: If a valid story is not found.
StorySourceError: If source does not return any data.
"""
if self._meta is None and self.fetcher:
self._meta = self.fetcher.fetch_meta(self.key)
if self._meta is None:
raise StorySourceError("Meta is missing.")
return self._meta
@property
def has_data(self) -> bool:
"""
True if story data has been fetched.
"""
return self._data is not None
@property
def data(self) -> bytes:
"""
Returns the story data.
Data may be fetched if this story instance is lazy.
Raises:
InvalidStoryError: If a valid story is not found.
StorySourceError: If source does not return any data.
"""
if self._data is None and self.fetcher:
self._data = self.fetcher.fetch_data(self.key)
if self._data is None:
raise StorySourceError("Data is missing.")
return self._data
def merge(self, **params) -> 'Story':
"""
Returns a shallow copy, optionally replacing attributes.
Args:
**params: Overrides parameters from the current instance.
Raises:
TypeError: If passed an unexpected parameter.
"""
kwargs = {k.lstrip('_'): v for k, v in vars(self).items()}
kwargs.update(params)
return type(self)(**kwargs)
| JockeTF/fimfarchive | fimfarchive/stories.py | Python | gpl-3.0 | 3,919 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Ralf Klammer <milkbread@freenet.de>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
'''Enhances builder connections, provides object to access glade objects'''
from gi.repository import GObject, Gtk # pylint: disable=E0611
import inspect
import functools
import logging
logger = logging.getLogger('maproj_lib')
from xml.etree.cElementTree import ElementTree
# this module is big so uses some conventional prefixes and postfixes
# *s list, except self.widgets is a dictionary
# *_dict dictionary
# *name string
# ele_* element in a ElementTree
# pylint: disable=R0904
# the many public methods is a feature of Gtk.Builder
class Builder(Gtk.Builder):
''' extra features
connects glade defined handler to default_handler if necessary
auto connects widget to handler with matching name or alias
auto connects several widgets to a handler via multiple aliases
allow handlers to lookup widget name
logs every connection made, and any on_* not made
'''
def __init__(self):
Gtk.Builder.__init__(self)
self.widgets = {}
self.glade_handler_dict = {}
self.connections = []
self._reverse_widget_dict = {}
# pylint: disable=R0201
# this is a method so that a subclass of Builder can redefine it
def default_handler(self,
handler_name, filename, *args, **kwargs):
'''helps the apprentice guru
glade defined handlers that do not exist come here instead.
An apprentice guru might wonder which signal does what he wants,
now he can define any likely candidates in glade and notice which
ones get triggered when he plays with the project.
this method does not appear in Gtk.Builder'''
logger.debug('''tried to call non-existent function:%s()
expected in %s
args:%s
kwargs:%s''', handler_name, filename, args, kwargs)
# pylint: enable=R0201
def get_name(self, widget):
''' allows a handler to get the name (id) of a widget
this method does not appear in Gtk.Builder'''
return self._reverse_widget_dict.get(widget)
def add_from_file(self, filename):
'''parses xml file and stores wanted details'''
Gtk.Builder.add_from_file(self, filename)
# extract data for the extra interfaces
tree = ElementTree()
tree.parse(filename)
ele_widgets = tree.getiterator("object")
for ele_widget in ele_widgets:
name = ele_widget.attrib['id']
widget = self.get_object(name)
# populate indexes - a dictionary of widgets
self.widgets[name] = widget
# populate a reversed dictionary
self._reverse_widget_dict[widget] = name
# populate connections list
ele_signals = ele_widget.findall("signal")
connections = [
(name,
ele_signal.attrib['name'],
ele_signal.attrib['handler']) for ele_signal in ele_signals]
if connections:
self.connections.extend(connections)
ele_signals = tree.getiterator("signal")
for ele_signal in ele_signals:
self.glade_handler_dict.update(
{ele_signal.attrib["handler"]: None})
def connect_signals(self, callback_obj):
'''connect the handlers defined in glade
reports successful and failed connections
and logs call to missing handlers'''
filename = inspect.getfile(callback_obj.__class__)
callback_handler_dict = dict_from_callback_obj(callback_obj)
connection_dict = {}
connection_dict.update(self.glade_handler_dict)
connection_dict.update(callback_handler_dict)
for item in connection_dict.items():
if item[1] is None:
# the handler is missing so reroute to default_handler
handler = functools.partial(
self.default_handler, item[0], filename)
connection_dict[item[0]] = handler
# replace the run time warning
logger.warn("expected handler '%s' in %s",
item[0], filename)
# connect glade define handlers
Gtk.Builder.connect_signals(self, connection_dict)
# let's tell the user how we applied the glade design
for connection in self.connections:
widget_name, signal_name, handler_name = connection
logger.debug("connect builder by design '%s', '%s', '%s'",
widget_name, signal_name, handler_name)
def get_ui(self, callback_obj=None, by_name=True):
'''Creates the ui object with widgets as attributes
connects signals by 2 methods
this method does not appear in Gtk.Builder'''
result = UiFactory(self.widgets)
# Hook up any signals the user defined in glade
if callback_obj is not None:
# connect glade define handlers
self.connect_signals(callback_obj)
if by_name:
auto_connect_by_name(callback_obj, self)
return result
# pylint: disable=R0903
# this class deliberately does not provide any public interfaces
# apart from the glade widgets
class UiFactory():
''' provides an object with attributes as glade widgets'''
def __init__(self, widget_dict):
self._widget_dict = widget_dict
for (widget_name, widget) in widget_dict.items():
setattr(self, widget_name, widget)
# Mangle any non-usable names (like with spaces or dashes)
# into pythonic ones
cannot_message = """cannot bind ui.%s, name already exists
consider using a pythonic name instead of design name '%s'"""
consider_message = """consider using a pythonic name instead of design name '%s'"""
for (widget_name, widget) in widget_dict.items():
pyname = make_pyname(widget_name)
if pyname != widget_name:
if hasattr(self, pyname):
logger.debug(cannot_message, pyname, widget_name)
else:
logger.debug(consider_message, widget_name)
setattr(self, pyname, widget)
def iterator():
'''Support 'for o in self' '''
return iter(widget_dict.values())
setattr(self, '__iter__', iterator)
def __getitem__(self, name):
'access as dictionary where name might be non-pythonic'
return self._widget_dict[name]
# pylint: enable=R0903
def make_pyname(name):
''' mangles non-pythonic names into pythonic ones'''
pyname = ''
for character in name:
if (character.isalpha() or character == '_' or
(pyname and character.isdigit())):
pyname += character
else:
pyname += '_'
return pyname
# Until bug https://bugzilla.gnome.org/show_bug.cgi?id=652127 is fixed, we
# need to reimplement inspect.getmembers. GObject introspection doesn't
# play nice with it.
def getmembers(obj, check):
members = []
for k in dir(obj):
try:
attr = getattr(obj, k)
except:
continue
if check(attr):
members.append((k, attr))
members.sort()
return members
def dict_from_callback_obj(callback_obj):
'''a dictionary interface to callback_obj'''
methods = getmembers(callback_obj, inspect.ismethod)
aliased_methods = [x[1] for x in methods if hasattr(x[1], 'aliases')]
# a method may have several aliases
#~ @alias('on_btn_foo_clicked')
#~ @alias('on_tool_foo_activate')
#~ on_menu_foo_activate():
#~ pass
alias_groups = [(x.aliases, x) for x in aliased_methods]
aliases = []
for item in alias_groups:
for alias in item[0]:
aliases.append((alias, item[1]))
dict_methods = dict(methods)
dict_aliases = dict(aliases)
results = {}
results.update(dict_methods)
results.update(dict_aliases)
return results
def auto_connect_by_name(callback_obj, builder):
'''finds handlers like on_<widget_name>_<signal> and connects them
i.e. find widget,signal pair in builder and call
widget.connect(signal, on_<widget_name>_<signal>)'''
callback_handler_dict = dict_from_callback_obj(callback_obj)
for item in builder.widgets.items():
(widget_name, widget) = item
signal_ids = []
try:
widget_type = type(widget)
while widget_type:
signal_ids.extend(GObject.signal_list_ids(widget_type))
widget_type = GObject.type_parent(widget_type)
except RuntimeError: # pylint wants a specific error
pass
signal_names = [GObject.signal_name(sid) for sid in signal_ids]
# Now, automatically find any the user didn't specify in glade
for sig in signal_names:
# using convention suggested by glade
sig = sig.replace("-", "_")
handler_names = ["on_%s_%s" % (widget_name, sig)]
# Using the convention that the top level window is not
# specified in the handler name. That is use
# on_destroy() instead of on_windowname_destroy()
if widget is callback_obj:
handler_names.append("on_%s" % sig)
do_connect(item, sig, handler_names,
callback_handler_dict, builder.connections)
log_unconnected_functions(callback_handler_dict, builder.connections)
def do_connect(item, signal_name, handler_names,
callback_handler_dict, connections):
'''connect this signal to an unused handler'''
widget_name, widget = item
for handler_name in handler_names:
target = handler_name in callback_handler_dict.keys()
connection = (widget_name, signal_name, handler_name)
duplicate = connection in connections
if target and not duplicate:
widget.connect(signal_name, callback_handler_dict[handler_name])
connections.append(connection)
logger.debug("connect builder by name '%s','%s', '%s'",
widget_name, signal_name, handler_name)
def log_unconnected_functions(callback_handler_dict, connections):
'''log functions like on_* that we could not connect'''
connected_functions = [x[2] for x in connections]
handler_names = callback_handler_dict.keys()
unconnected = [x for x in handler_names if x.startswith('on_')]
for handler_name in connected_functions:
try:
unconnected.remove(handler_name)
except ValueError:
pass
for handler_name in unconnected:
logger.debug("Not connected to builder '%s'", handler_name)
| milkbread/MapRoj | maproj_lib/Builder.py | Python | gpl-3.0 | 11,391 |
import sqlite3
import discord_logging
log = discord_logging.init_logging()
import static
from database import Database
from classes.subscription import Subscription
new_db = Database()
new_db.session.query(Subscription).delete(synchronize_session='fetch')
valid_authors = set()
authors_file_read = open("valid_authors.txt", 'r')
for line in authors_file_read:
valid_authors.add(line.strip().lower())
authors_file_read.close()
subreddits = {}
for subreddit in new_db.get_all_subreddits():
subreddits[subreddit.name.lower()] = subreddit
user_map = {}
count_subscriptions = 0
invalid_subscriptions_file_write = open("invalid_subscriptions.txt", 'w')
dbConn = sqlite3.connect(static.DB_TO_MIGRATE_FROM)
c = dbConn.cursor()
log.info(f"Starting subscriptions")
for row in c.execute('''
select Subscriber, SubscribedTo, Subreddit, Single
from subscriptions
'''):
if row[1] in valid_authors or row[1] == 'sub':
subreddit = subreddits.get(row[2])
if subreddit is not None:
subscriber = user_map.get(row[0])
if subscriber is None:
subscriber = new_db.get_or_add_user(row[0])
user_map[row[0]] = subscriber
if row[1] == 'sub':
author = None
else:
author = user_map.get(row[1])
if author is None:
author = new_db.get_or_add_user(row[1])
user_map[row[1]] = author
subscription = Subscription(
subscriber=subscriber,
author=author,
subreddit=subreddit,
recurring=not row[3]
)
new_db.add_subscription(subscription)
else:
try:
invalid_subscriptions_file_write.write(f"sub: r/{row[2]} u/{row[1]} u/{row[0]} : {row[3]}\n")
except Exception:
pass
else:
try:
invalid_subscriptions_file_write.write(f"user: r/{row[2]} u/{row[1]} u/{row[0]} : {row[3]}\n")
except Exception:
pass
count_subscriptions += 1
if count_subscriptions % 1000 == 0:
log.info(f"Added {count_subscriptions} subscriptions")
new_db.commit()
log.info(f"Finished adding {count_subscriptions} subscriptions")
dbConn.close()
new_db.close()
invalid_subscriptions_file_write.close()
| Watchful1/RedditSubsBot | scripts/migrate_subscriptions.py | Python | mit | 2,043 |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from .views import ProductDetailView, ProductListView, ProductVariationListView, CategoryListView, CategoryDetailView#, pdf_view
urlpatterns = [
url(r'^(?P<pk>\d+)/$', ProductDetailView.as_view(), name='product_detail'),
url(r'^$', ProductListView.as_view(), name='product_list'),
url(r'^(?P<pk>\d+)/inventory/$', ProductVariationListView.as_view(), name='product_variation_list'),
url(r'^categories/$', CategoryListView.as_view(), name='category_list'),
url(r'^categories/(?P<slug>[\w-]+)/$', CategoryDetailView.as_view(), name='category_detail'),
# url(r'^test_pdf/(?P<slug>[\w-]+)/$', pdf_view, name='pdf_test'),
]
| maistrovas/Internet-Store | Internet_store/products/urls.py | Python | mit | 840 |
import hashlib
import http.server
import json
import os
import re
import shutil
import socketserver
import subprocess
from itertools import chain
from multiprocessing import Process
from shutil import rmtree, which
from subprocess import check_call
import requests
from pkgpanda.exceptions import FetchError, ValidationError
json_prettyprint_args = {
"sort_keys": True,
"indent": 2,
"separators": (',', ':')
}
def variant_str(variant):
"""Return a string representation of variant."""
if variant is None:
return ''
return variant
def variant_name(variant):
"""Return a human-readable string representation of variant."""
if variant is None:
return '<default>'
return variant
def variant_prefix(variant):
"""Return a filename prefix for variant."""
if variant is None:
return ''
return variant + '.'
def download(out_filename, url, work_dir):
assert os.path.isabs(out_filename)
assert os.path.isabs(work_dir)
work_dir = work_dir.rstrip('/')
# Strip off whitespace to make it so scheme matching doesn't fail because
# of simple user whitespace.
url = url.strip()
# Handle file:// urls specially since requests doesn't know about them.
try:
if url.startswith('file://'):
src_filename = url[len('file://'):]
if not os.path.isabs(src_filename):
src_filename = work_dir + '/' + src_filename
shutil.copyfile(src_filename, out_filename)
else:
# Download the file.
with open(out_filename, "w+b") as f:
r = requests.get(url, stream=True)
if r.status_code == 301:
raise Exception("got a 301")
r.raise_for_status()
for chunk in r.iter_content(chunk_size=4096):
f.write(chunk)
except Exception as fetch_exception:
rm_passed = False
# try / except so if remove fails we don't get an exception during an exception.
# Sets rm_passed to true so if this fails we can include a special error message in the
# FetchError
try:
os.remove(out_filename)
rm_passed = True
except Exception:
pass
raise FetchError(url, out_filename, fetch_exception, rm_passed) from fetch_exception
def download_atomic(out_filename, url, work_dir):
assert os.path.isabs(out_filename)
tmp_filename = out_filename + '.tmp'
try:
download(tmp_filename, url, work_dir)
os.rename(tmp_filename, out_filename)
except FetchError:
try:
os.remove(tmp_filename)
except:
pass
raise
def extract_tarball(path, target):
"""Extract the tarball into target.
If there are any errors, delete the folder being extracted to.
"""
# TODO(cmaloney): Validate extraction will pass before unpacking as much as possible.
# TODO(cmaloney): Unpack into a temporary directory then move into place to
# prevent partial extraction from ever laying around on the filesystem.
try:
assert os.path.exists(path), "Path doesn't exist but should: {}".format(path)
check_call(['mkdir', '-p', target])
check_call(['tar', '-xf', path, '-C', target])
except:
# If there are errors, we can't really cope since we are already in an error state.
rmtree(target, ignore_errors=True)
raise
def load_json(filename):
try:
with open(filename) as f:
return json.load(f)
except ValueError as ex:
raise ValueError("Invalid JSON in {0}: {1}".format(filename, ex)) from ex
def make_file(name):
with open(name, 'a'):
pass
def write_json(filename, data):
with open(filename, "w+") as f:
return json.dump(data, f, **json_prettyprint_args)
def write_string(filename, data):
with open(filename, "w+") as f:
return f.write(data)
def load_string(filename):
with open(filename) as f:
return f.read().strip()
def json_prettyprint(data):
return json.dumps(data, **json_prettyprint_args)
def if_exists(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except FileNotFoundError:
return None
def sha1(filename):
hasher = hashlib.sha1()
with open(filename, 'rb') as fh:
while 1:
buf = fh.read(4096)
if not buf:
break
hasher.update(buf)
return hasher.hexdigest()
def expect_folder(path, files):
path_contents = os.listdir(path)
assert set(path_contents) == set(files)
def expect_fs(folder, contents):
if isinstance(contents, list):
expect_folder(folder, contents)
elif isinstance(contents, dict):
expect_folder(folder, contents.keys())
for path in iter(contents):
if contents[path] is not None:
expect_fs(os.path.join(folder, path), contents[path])
else:
raise ValueError("Invalid type {0} passed to expect_fs".format(type(contents)))
def make_tar(result_filename, change_folder):
tar_cmd = ["tar", "--numeric-owner", "--owner=0", "--group=0"]
if which("pxz"):
tar_cmd += ["--use-compress-program=pxz", "-cf"]
else:
tar_cmd += ["-cJf"]
tar_cmd += [result_filename, "-C", change_folder, "."]
check_call(tar_cmd)
def rewrite_symlinks(root, old_prefix, new_prefix):
# Find the symlinks and rewrite them from old_prefix to new_prefix
# All symlinks not beginning with old_prefix are ignored because
# packages may contain arbitrary symlinks.
for root_dir, dirs, files in os.walk(root):
for name in chain(files, dirs):
full_path = os.path.join(root_dir, name)
if os.path.islink(full_path):
# Rewrite old_prefix to new_prefix if present.
target = os.readlink(full_path)
if target.startswith(old_prefix):
new_target = os.path.join(new_prefix, target[len(old_prefix) + 1:].lstrip('/'))
# Remove the old link and write a new one.
os.remove(full_path)
os.symlink(new_target, full_path)
def check_forbidden_services(path, services):
"""Check if package contains systemd services that may break DC/OS
This functions checks the contents of systemd's unit file dirs and
throws the exception if there are reserved services inside.
Args:
path: path where the package contents are
services: list of reserved services to look for
Raises:
ValidationError: Reserved serice names were found inside the package
"""
services_dir_regexp = re.compile(r'dcos.target.wants(?:_.+)?')
forbidden_srv_set = set(services)
pkg_srv_set = set()
for direntry in os.listdir(path):
if not services_dir_regexp.match(direntry):
continue
pkg_srv_set.update(set(os.listdir(os.path.join(path, direntry))))
found_units = forbidden_srv_set.intersection(pkg_srv_set)
if found_units:
msg = "Reverved unit names found: " + ','.join(found_units)
raise ValidationError(msg)
def run(cmd, *args, **kwargs):
proc = subprocess.Popen(cmd, *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = proc.communicate()
print("STDOUT: ", stdout.decode('utf-8'))
print("STDERR: ", stderr.decode('utf-8'))
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, cmd)
assert len(stderr) == 0
return stdout.decode('utf-8')
def launch_server(directory):
os.chdir("resources/repo")
httpd = socketserver.TCPServer(
("", 8000),
http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
class TestRepo:
def __init__(self, repo_dir):
self.__dir = repo_dir
def __enter__(self):
self.__server = Process(target=launch_server, args=(self.__dir))
self.__server.start()
def __exit__(self, exc_type, exc_value, traceback):
self.__server.join()
def resources_test_dir(path):
assert not path.startswith('/')
return "pkgpanda/test_resources/{}".format(path)
| xinxian0458/dcos | pkgpanda/util.py | Python | apache-2.0 | 8,247 |
from .directory_subnav_definition import (
create_directory_subnav,
delete_directory_subnav
)
from .event_subnav_definition import (
create_events_subnav,
delete_events_subnav
)
from .home_subnav_definition import (
create_home_subnav,
delete_home_subnav
)
from .judging_subnav_definition import (
create_judging_subnav,
delete_judging_subnav
)
from .resources_subnav_definition import (
create_resources_subnav,
delete_resources_subnav
)
from .startup_dashboard_subnav_definition import (
create_startup_dashboard_subnav,
delete_startup_dashboard_subnav,
STARTUP_DASHBOARD_SUBNAV_ITEMS,
STARTUP_DASHBOARD_TREE_ALIAS,
)
| masschallenge/django-accelerator | accelerator/sitetree_navigation/sub_navigation/__init__.py | Python | mit | 676 |
#!/usr/bin/env python
from flask import Config
from database import NodeDB
import graphPlotter
def generate_graph(time_limit=60*60*3):
nodes, edges = load_graph_from_db(time_limit)
print '%d nodes, %d edges' % (len(nodes), len(edges))
graph = graphPlotter.position_nodes(nodes, edges)
json = graphPlotter.get_graph_json(graph)
with open('static/graph.json', 'w') as f:
f.write(json)
def load_graph_from_db(time_limit):
config = Config('./')
config.from_pyfile('web_config.cfg')
with NodeDB(config) as db:
nodes = db.get_nodes(time_limit)
edges = db.get_edges(nodes, 60*60*24*7)
return (nodes, edges)
if __name__ == '__main__':
generate_graph()
| zielmicha/fc00.org | web/updateGraph.py | Python | gpl-3.0 | 721 |
class Edge:
def __init__(self, h1, h2, bw):
#source node
self.h1 = h1
#sink node
self.h2 = h2
#bandwidth on this link
self.bw = bw
| AndreaLombardo90/SDN-Controller | edge.py | Python | apache-2.0 | 162 |
###############################################################################
# Name: ed_menu.py #
# Purpose: Editra's Menubar and Menu related classes #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007-2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
@package: Editra.src.ed_menu
Provides an advanced menu class for easily creating menus and setting their
related bitmaps when available from Editra's ArtProvider. The Keybinder class
for managing keybindings and profiles is also provided by this module.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: ed_menu.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#--------------------------------------------------------------------------#
# Dependencies
import os
import wx
# Editra Libraries
import ed_glob
import ed_msg
import profiler
import util
from syntax import syntax
from syntax import synglob
#--------------------------------------------------------------------------#
# Globals
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class EdMenu(wx.Menu):
"""Custom wxMenu class that makes it easier to customize and access items.
"""
def __init__(self, title=wx.EmptyString, style=0):
"""Initialize a Menu Object
@param title: menu title string
@param style: type of menu to create
"""
super(EdMenu, self).__init__(title, style)
def Append(self, id_, text=u'', helpstr=u'', \
kind=wx.ITEM_NORMAL, use_bmp=True):
"""Append a MenuItem
@param id_: New MenuItem ID
@keyword text: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
"""
item = wx.MenuItem(self, id_, text, helpstr, kind)
self.AppendItem(item, use_bmp)
return item
def AppendEx(self, id_, text=u'', helpstr=u'',
kind=wx.ITEM_NORMAL, use_bmp=True):
"""Like L{Append} but automatically applies keybindings to text
based on item id.
"""
binding = EdMenuBar.keybinder.GetBinding(id_)
item = self.Append(id_, text+binding, helpstr, kind, use_bmp)
return item
def AppendItem(self, item, use_bmp=True):
"""Appends a MenuItem to the menu and adds an associated
bitmap if one is available, unless use_bmp is set to false.
@param item: wx.MenuItem
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
"""
if use_bmp and item.GetKind() == wx.ITEM_NORMAL:
self.SetItemBitmap(item)
super(EdMenu, self).AppendItem(item)
def Insert(self, pos, id_, text=u'', helpstr=u'', \
kind=wx.ITEM_NORMAL, use_bmp=True):
"""Insert an item at position and attach a bitmap
if one is available.
@param pos: Position to insert new item at
@param id_: New MenuItem ID
@keyword label: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
"""
item = super(EdMenu, self).Insert(pos, id_, text, helpstr, kind)
if use_bmp and kind == wx.ITEM_NORMAL:
self.SetItemBitmap(item)
return item
def InsertAfter(self, item_id, id_, label=u'', helpstr=u'',
kind=wx.ITEM_NORMAL, use_bmp=True):
"""Inserts the given item after the specified item id in
the menu. If the id cannot be found then the item will appended
to the end of the menu.
@param item_id: Menu ID to insert after
@param id_: New MenuItem ID
@keyword label: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
@return: the inserted menu item
"""
pos = None
for item in xrange(self.GetMenuItemCount()):
mitem = self.FindItemByPosition(item)
if mitem.GetId() == item_id:
pos = item
break
if pos:
mitem = self.Insert(pos + 1, id_, label, helpstr, kind, use_bmp)
else:
mitem = self.Append(id_, label, helpstr, kind, use_bmp)
return mitem
def InsertBefore(self, item_id, id_, label=u'', helpstr=u'',
kind=wx.ITEM_NORMAL, use_bmp=True):
"""Inserts the given item before the specified item id in
the menu. If the id cannot be found then the item will appended
to the end of the menu.
@param item_id: Menu ID to insert new item before
@param id_: New MenuItem ID
@keyword label: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
@return: menu item that was inserted
"""
pos = None
for item in xrange(self.GetMenuItemCount()):
mitem = self.FindItemByPosition(item)
if mitem.GetId() == item_id:
pos = item
break
if pos:
mitem = self.Insert(pos, id_, label, helpstr, kind, use_bmp)
else:
mitem = self.Append(id_, label, helpstr, kind, use_bmp)
return mitem
def InsertAlpha(self, id_, label=u'', helpstr=u'',
kind=wx.ITEM_NORMAL, after=0, use_bmp=True):
"""Attempts to insert the new menuitem into the menu
alphabetically. The optional parameter 'after' is used
specify an item id to start the alphabetical lookup after.
Otherwise the lookup begins from the first item in the menu.
@param id_: New MenuItem ID
@keyword label: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword after: id of item to start alpha lookup after
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
@return: menu item that was inserted
"""
if after:
start = False
else:
start = True
last_ind = self.GetMenuItemCount() - 1
pos = last_ind
for item in range(self.GetMenuItemCount()):
mitem = self.FindItemByPosition(item)
if mitem.IsSeparator():
continue
mlabel = mitem.GetItemLabel()
if after and mitem.GetId() == after:
start = True
continue
if after and not start:
continue
if label < mlabel:
pos = item
break
l_item = self.FindItemByPosition(last_ind)
if pos == last_ind and (l_item.IsSeparator() or label > mlabel):
mitem = self.Append(id_, label, helpstr, kind, use_bmp)
else:
mitem = self.Insert(pos, id_, label, helpstr, kind, use_bmp)
return mitem
def RemoveItemByName(self, name):
"""Removes an item by the label. It will remove the first
item matching the given name in the menu, the matching is
case sensitive. The return value is the either the id of the
removed item or None if the item was not found.
@param name: name of item to remove
@return: id of removed item or None if not found
"""
menu_id = None
for pos in range(self.GetMenuItemCount()):
item = self.FindItemByPosition(pos)
if name == item.GetLabel():
menu_id = item.GetId()
self.Remove(menu_id)
break
return menu_id
def SetItemBitmap(self, item):
"""Sets the MenuItems bitmap by getting the id from the
artprovider if one exists.
@param item: item to set bitmap for
"""
bmp = wx.ArtProvider.GetBitmap(str(item.GetId()), wx.ART_MENU)
if not bmp.IsNull():
item.SetBitmap(bmp)
#-----------------------------------------------------------------------------#
class KeyBinder(object):
"""Class for managing keybinding configurations"""
cprofile = None # Current Profile Name String
keyprofile = dict() # Active Profile (dict)
def __init__(self):
"""Create the KeyBinder object"""
super(KeyBinder, self).__init__()
# Attributes
self.cache = ed_glob.CONFIG['CACHE_DIR'] # Resource Directory
def GetBinding(self, item_id):
"""Get the keybinding string for use in a menu
@param item_id: Menu Item Id
@return: string
"""
rbind = self.GetRawBinding(item_id)
shortcut = u''
if rbind is not None:
shortcut = u"+".join(rbind)
if len(shortcut):
shortcut = u"\t" + shortcut
return unicode(shortcut)
@classmethod
def GetCurrentProfile(cls):
"""Get the name of the currently set key profile if one exists
@param cls: Class Object
@return: string or None
"""
return cls.cprofile
@classmethod
def GetCurrentProfileDict(cls):
"""Get the dictionary of keybindings
@param cls: Class Object
@return: dict
"""
return cls.keyprofile
@staticmethod
def GetKeyProfiles():
"""Get the list of available key profiles
@return: list of strings
"""
recs = util.GetResourceFiles(u'cache', trim=True, get_all=False,
suffix='.ekeys', title=False)
if recs == -1:
recs = list()
tmp = util.GetResourceFiles(u'ekeys', True, True, '.ekeys', False)
if tmp != -1:
recs.extend(tmp)
return recs
def GetProfilePath(self, pname):
"""Get the full path to the given keyprofile
@param pname: profile name
@return: string or None
@note: expects unique name for each profile in the case that
a name exists in both the user and system paths the one
found on the user path will be returned.
"""
if pname is None:
return None
rname = None
for rec in self.GetKeyProfiles():
if rec.lower() == pname.lower():
rname = rec
break
# Must be a new profile
if rname is None:
rname = pname
kprof = u"%s%s.ekeys" % (ed_glob.CONFIG['CACHE_DIR'], rname)
if not os.path.exists(kprof):
# Must be a system supplied keyprofile
rname = u"%s%s.ekeys" % (ed_glob.CONFIG['KEYPROF_DIR'], rname)
if not os.path.exists(rname):
# Doesn't exist at syspath either so instead assume it is a new
# custom user defined key profile.
rname = kprof
else:
rname = kprof
return rname
@classmethod
def GetRawBinding(cls, item_id):
"""Get the raw key binding tuple
@param cls: Class Object
@param item_id: MenuItem Id
@return: tuple
"""
return cls.keyprofile.get(item_id, None)
@classmethod
def FindMenuId(cls, keyb):
"""Find the menu item ID that the
keybinding is currently associated with.
@param cls: Class Object
@param keyb: tuple of unicode (u'Ctrl', u'C')
@return: int (-1 if not found)
"""
menu_id = -1
for key, val in cls.keyprofile.iteritems():
if val == keyb:
menu_id = key
break
return menu_id
@classmethod
def LoadDefaults(cls):
"""Load the default key profile"""
cls.keyprofile = dict(_DEFAULT_BINDING)
cls.cprofile = None
def LoadKeyProfile(self, pname):
"""Load a key profile from profile directory into the binder
by name.
@param pname: name of key profile to load
"""
if pname is None:
ppath = None
else:
ppath = self.GetProfilePath(pname)
self.LoadKeyProfileFile(ppath)
def LoadKeyProfileFile(self, path):
"""Load a key profile from the given path
@param path: full path to file
"""
keydict = dict()
pname = None
if path:
pname = os.path.basename(path)
pname = pname.rsplit('.', 1)[0]
if pname is not None and os.path.exists(path):
reader = util.GetFileReader(path)
if reader != -1:
util.Log("[keybinder][info] Loading KeyProfile: %s" % path)
for line in reader:
parts = line.split(u'=', 1)
# Check that the line was formatted properly
if len(parts) == 2:
# Try to find the ID value
item_id = _GetValueFromStr(parts[0])
if item_id is not None:
tmp = [ part.strip()
for part in parts[1].split(u'+')
if len(part.strip()) ]
# Do some checking if the binding is valid
nctrl = len([key for key in tmp
if key not in (u'Ctrl', u'Alt', u'Shift')])
if nctrl:
if parts[1].strip().endswith(u'++'):
tmp.append(u'+')
kb = tuple(tmp)
if kb in keydict.values():
for mid, b in keydict.iteritems():
if kb == b:
del keydict[mid]
break
keydict[item_id] = tuple(tmp)
else:
# Invalid key binding
continue
reader.close()
KeyBinder.keyprofile = keydict
KeyBinder.cprofile = pname
return
else:
util.Log("[keybinder][err] Couldn't read %s" % path)
elif pname is not None:
# Fallback to default keybindings
util.Log("[keybinder][err] Failed to load bindings from %s" % pname)
util.Log("[keybinder][info] Loading Default Keybindings")
KeyBinder.LoadDefaults()
def SaveKeyProfile(self):
"""Save the current key profile to disk"""
if KeyBinder.cprofile is None:
util.Log("[keybinder][warn] No keyprofile is set, cant save")
else:
ppath = self.GetProfilePath(KeyBinder.cprofile)
writer = util.GetFileWriter(ppath)
if writer != -1:
itemlst = list()
for item in KeyBinder.keyprofile.keys():
itemlst.append(u"%s=%s%s" % (_FindStringRep(item),
self.GetBinding(item).lstrip(),
os.linesep))
writer.writelines(sorted(itemlst))
writer.close()
else:
util.Log("[keybinder][err] Failed to open %s for writing" % ppath)
@classmethod
def SetBinding(cls, item_id, keys):
"""Set the keybinding of a menu id
@param cls: Class Object
@param item_id: item to set
@param keys: string or list of key strings ['Ctrl', 'S']
"""
if isinstance(keys, basestring):
keys = [ key.strip() for key in keys.split(u'+')
if len(key.strip())]
keys = tuple(keys)
if len(keys):
# Check for an existing binding
menu_id = cls.FindMenuId(keys)
if menu_id != -1:
del cls.keyprofile[menu_id]
# Set the binding
cls.keyprofile[item_id] = keys
elif item_id in cls.keyprofile:
# Clear the binding
del cls.keyprofile[item_id]
else:
pass
@classmethod
def SetProfileName(cls, pname):
"""Set the name of the current profile
@param cls: Class Object
@param pname: name to set profile to
"""
cls.cprofile = pname
@classmethod
def SetProfileDict(cls, keyprofile):
"""Set the keyprofile using a dictionary of id => bindings
@param cls: Class Object
@param keyprofile: { menu_id : (u'Ctrl', u'C'), }
"""
cls.keyprofile = keyprofile
#-----------------------------------------------------------------------------#
class EdMenuBar(wx.MenuBar):
"""Custom menubar to allow for easier access and updating
of menu components.
@todo: redo all of this
"""
keybinder = KeyBinder()
def __init__(self, style=0):
"""Initializes the Menubar
@keyword style: style to set for menu bar
"""
super(EdMenuBar, self).__init__(style)
# Setup
if EdMenuBar.keybinder.GetCurrentProfile() is None:
kprof = profiler.Profile_Get('KEY_PROFILE', default='default')
EdMenuBar.keybinder.LoadKeyProfile(kprof)
# Attributes
self._menus = dict()
self.GenFileMenu()
self.GenEditMenu()
self.GenViewMenu()
self.GenFormatMenu()
self.GenSettingsMenu()
self.GenToolsMenu()
self.GenHelpMenu()
# Message handlers
ed_msg.Subscribe(self.OnRebind, ed_msg.EDMSG_MENU_REBIND)
ed_msg.Subscribe(self.OnLoadProfile, ed_msg.EDMSG_MENU_LOADPROFILE)
ed_msg.Subscribe(self.OnCreateLexerMenu, ed_msg.EDMSG_CREATE_LEXER_MENU)
def GenLexerMenu(self):
"""Create the Lexer menu"""
settingsmenu = self._menus['settings']
item = settingsmenu.FindItemById(ed_glob.ID_LEXER)
if item:
settingsmenu.Remove(ed_glob.ID_LEXER)
# Create the menu
langmenu = wx.Menu()
langmenu.Append(ed_glob.ID_LEXER_CUSTOM, _("Customize..."),
_("Customize the items shown in this menu."))
langmenu.AppendSeparator()
EdMenuBar.PopulateLexerMenu(langmenu)
settingsmenu.AppendMenu(ed_glob.ID_LEXER, _("Lexers"),
langmenu,
_("Manually Set a Lexer/Syntax"))
@staticmethod
def PopulateLexerMenu(langmenu):
"""Create a menu with all the lexer options
@return: wx.Menu
"""
mconfig = profiler.Profile_Get('LEXERMENU', default=list())
mconfig.sort()
for label in mconfig:
lid = synglob.GetIdFromDescription(label)
langmenu.Append(lid, label,
_("Switch Lexer to %s") % label, wx.ITEM_CHECK)
@classmethod
def DeleteKeyProfile(cls, pname):
"""Remove named keyprofile
@param cls: Class Object
@param pname: keyprofile name
@return: True if removed, False otherwise
"""
ppath = cls.keybinder.GetProfilePath(pname)
if ppath is not None and os.path.exists(ppath):
try:
os.remove(ppath)
except:
return False
else:
return True
else:
return False
# TODO these Gen* functions should be broken up to the components
# that supply the functionality and inserted in the menus on
# init when the editor loads an associated widget.
def GenFileMenu(self):
"""Makes and attaches the file menu
@return: None
"""
filemenu = EdMenu()
filehist = self._menus['filehistory'] = EdMenu()
filemenu.AppendEx(ed_glob.ID_NEW, _("&New Tab"),
_("Start a new file in a new tab"))
filemenu.AppendEx(ed_glob.ID_NEW_WINDOW, _("New &Window"),
_("Start a new file in a new window"))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_OPEN, _("&Open"), _("Open"))
## Setup File History in the File Menu
filemenu.AppendMenu(ed_glob.ID_FHIST, _("Open &Recent"),
filehist, _("Recently Opened Files"))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_CLOSE, _("&Close Tab"),
_("Close Current Tab"))
filemenu.AppendEx(ed_glob.ID_CLOSE_WINDOW,
_("Close Window") , _("Close the current window"))
filemenu.AppendEx(ed_glob.ID_CLOSEALL, _("Close All Tabs"),
_("Close all open tabs"))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_SAVE, _("&Save"), _("Save Current File"))
filemenu.AppendEx(ed_glob.ID_SAVEAS, _("Save &As"), _("Save As"))
filemenu.AppendEx(ed_glob.ID_SAVEALL, _("Save All"),
_("Save all open pages"))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_REVERT_FILE, _("Revert to Saved"),
_("Revert file to last save point"))
filemenu.AppendEx(ed_glob.ID_RELOAD_ENC, _("Reload with Encoding..."),
_("Reload the file with a specified encoding"))
filemenu.AppendSeparator()
# Profile
pmenu = EdMenu()
pmenu.AppendEx(ed_glob.ID_SAVE_PROFILE, _("Save Profile"),
_("Save Current Settings to a New Profile"))
pmenu.AppendEx(ed_glob.ID_LOAD_PROFILE, _("Load Profile"),
_("Load a Custom Profile"))
filemenu.AppendSubMenu(pmenu, _("Profile"),
_("Load and save custom Profiles"))
# Sessions
smenu = EdMenu()
smenu.AppendEx(ed_glob.ID_SAVE_SESSION, _("Save Session"),
_("Save the current session."))
smenu.AppendEx(ed_glob.ID_LOAD_SESSION, _("Load Session"),
_("Load a saved session."))
filemenu.AppendSubMenu(smenu, _("Sessions"),
_("Load and save custom sessions."))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_PRINT_SU, _("Page Set&up"),
_("Configure Printer"))
filemenu.AppendEx(ed_glob.ID_PRINT_PRE, _("Print Pre&view"),
_("Preview Printout"))
filemenu.AppendEx(ed_glob.ID_PRINT, _("&Print"), _("Print Current File"))
filemenu.AppendSeparator()
filemenu.AppendEx(ed_glob.ID_EXIT, _("E&xit"), _("Exit the Program"))
# Attach to menubar and save reference
self.Append(filemenu, _("&File"))
self._menus['file'] = filemenu
def GenEditMenu(self):
"""Makes and attaches the edit menu
@return: None
"""
editmenu = EdMenu()
editmenu.AppendEx(ed_glob.ID_UNDO, _("&Undo"), _("Undo Last Action"))
editmenu.AppendEx(ed_glob.ID_REDO, _("Redo"), _("Redo Last Undo"))
editmenu.AppendSeparator()
editmenu.AppendEx(ed_glob.ID_CUT, _("Cu&t"),
_("Cut Selected Text from File"))
editmenu.AppendEx(ed_glob.ID_COPY, _("&Copy"),
_("Copy Selected Text to Clipboard"))
editmenu.AppendEx(ed_glob.ID_PASTE, _("&Paste"),
_("Paste Text from Clipboard to File"))
editmenu.AppendEx(ed_glob.ID_PASTE_AFTER, _("P&aste After"),
_("Paste Text from Clipboard to File after the cursor"))
editmenu.AppendEx(ed_glob.ID_CYCLE_CLIPBOARD, _("Cycle Clipboard"),
_("Cycle through recent clipboard text"))
editmenu.AppendSeparator()
editmenu.AppendEx(ed_glob.ID_SELECTALL, _("Select &All"),
_("Select All Text in Document"))
editmenu.AppendEx(ed_glob.ID_COLUMN_MODE, _("Column Edit"),
_("Enable column edit mode."), wx.ITEM_CHECK)
editmenu.AppendSeparator()
linemenu = EdMenu()
linemenu.AppendEx(ed_glob.ID_LINE_AFTER, _("New Line After"),
_("Add a new line after the current line"))
linemenu.AppendEx(ed_glob.ID_LINE_BEFORE, _("New Line Before"),
_("Add a new line before the current line"))
linemenu.AppendSeparator()
linemenu.AppendEx(ed_glob.ID_CUT_LINE, _("Cut Line"),
_("Cut Current Line"))
linemenu.AppendEx(ed_glob.ID_DELETE_LINE, _("Delete Line"),
_("Delete the selected line(s)"))
linemenu.AppendEx(ed_glob.ID_COPY_LINE, _("Copy Line"),
_("Copy Current Line"))
linemenu.AppendEx(ed_glob.ID_DUP_LINE, _("Duplicate Line"),
_("Duplicate the current line"))
linemenu.AppendSeparator()
linemenu.AppendEx(ed_glob.ID_JOIN_LINES, _("Join Lines"),
_("Join the Selected Lines"))
linemenu.AppendEx(ed_glob.ID_TRANSPOSE, _("Transpose Line"),
_("Transpose the current line with the previous one"))
linemenu.AppendEx(ed_glob.ID_LINE_MOVE_UP, _("Move Current Line Up"),
_("Move the current line up"))
linemenu.AppendEx(ed_glob.ID_LINE_MOVE_DOWN,
_("Move Current Line Down"),
_("Move the current line down"))
editmenu.AppendMenu(ed_glob.ID_LINE_EDIT, _("Line Edit"), linemenu,
_("Commands that affect an entire line"))
bookmenu = EdMenu()
bookmenu.AppendEx(ed_glob.ID_ADD_BM, _("Toggle Bookmark"),
_("Toggle bookmark of the current line"))
bookmenu.AppendEx(ed_glob.ID_DEL_ALL_BM, _("Remove All Bookmarks"),
_("Remove all bookmarks from the current document"))
editmenu.AppendMenu(ed_glob.ID_BOOKMARK, _("Bookmarks"), bookmenu,
_("Add and remove bookmarks"))
editmenu.AppendSeparator()
# Autocompletion shortcuts
editmenu.AppendEx(ed_glob.ID_SHOW_AUTOCOMP, _("Word Completion"),
_("Show autocompletion hints."))
editmenu.AppendEx(ed_glob.ID_SHOW_CALLTIP, _("Show Calltip"),
_("Show a calltip for the current word."))
editmenu.AppendSeparator()
editmenu.AppendEx(ed_glob.ID_FIND, _("&Find"), _("Find Text"))
editmenu.AppendEx(ed_glob.ID_FIND_REPLACE, _("Find/R&eplace"),
_("Find and Replace Text"))
editmenu.AppendEx(ed_glob.ID_QUICK_FIND, _("&Quick Find"),
_("Open the Quick Find Bar"))
editmenu.AppendEx(ed_glob.ID_FIND_PREVIOUS, _("Find Previous"),
_("Goto previous match"))
editmenu.AppendEx(ed_glob.ID_FIND_NEXT, _("Find Next"),
_("Goto the next match"))
editmenu.AppendEx(ed_glob.ID_FIND_SELECTED, _("Find Selected"),
_("Search for the currently selected phrase"))
editmenu.AppendSeparator()
editmenu.AppendEx(ed_glob.ID_PREF, _("Pr&eferences"),
_("Edit Preferences / Settings"))
# Attach to menubar and save ref
self.Append(editmenu, _("&Edit"))
self._menus['edit'] = editmenu
def GenViewMenu(self):
"""Makes and attaches the view menu
@return: None
"""
viewmenu = EdMenu()
viewmenu.AppendEx(ed_glob.ID_ZOOM_OUT, _("Zoom Out"), _("Zoom Out"))
viewmenu.AppendEx(ed_glob.ID_ZOOM_IN, _("Zoom In"), _("Zoom In"))
viewmenu.AppendEx(ed_glob.ID_ZOOM_NORMAL, _("Zoom Default"),
_("Zoom Default"))
viewmenu.AppendSeparator()
viewedit = self._menus['viewedit'] = EdMenu()
viewedit.AppendEx(ed_glob.ID_HLCARET_LINE, _("Highlight Caret Line"),
_("Highlight the background of the current line"),
wx.ITEM_CHECK)
viewedit.AppendEx(ed_glob.ID_INDENT_GUIDES, _("Indentation Guides"),
_("Show Indentation Guides"), wx.ITEM_CHECK)
viewedit.AppendEx(ed_glob.ID_SHOW_EDGE, _("Show Edge Guide"),
_("Show the edge column guide"), wx.ITEM_CHECK)
viewedit.AppendEx(ed_glob.ID_SHOW_EOL, _("Show EOL Markers"),
_("Show EOL Markers"), wx.ITEM_CHECK)
viewedit.AppendEx(ed_glob.ID_SHOW_LN, _("Show Line Numbers"),
_("Show Line Number Margin"), wx.ITEM_CHECK)
viewedit.AppendEx(ed_glob.ID_SHOW_WS, _("Show Whitespace"),
_("Show Whitespace Markers"), wx.ITEM_CHECK)
viewmenu.AppendSubMenu(self._menus['viewedit'], _("Editor"), \
_("Toggle Editor View Options"))
viewfold = self._menus['viewfold'] = EdMenu()
viewfold.AppendEx(ed_glob.ID_TOGGLE_FOLD, _("Toggle fold"),
_("Toggle current fold"))
viewfold.AppendEx(ed_glob.ID_TOGGLE_ALL_FOLDS, _("Toggle all folds"),
_("Toggle all folds"))
viewmenu.AppendSubMenu(self._menus['viewfold'], _("Code Folding"), \
_("Code folding toggle actions"))
viewmenu.AppendSeparator()
viewmenu.AppendEx(ed_glob.ID_PANELIST, _("Pane Navigator"),
_("View pane selection list"))
viewmenu.AppendEx(ed_glob.ID_MAXIMIZE_EDITOR, _("Maximize Editor"),
_("Toggle Editor Maximization"))
viewmenu.AppendSeparator()
viewmenu.AppendEx(ed_glob.ID_GOTO_LINE, _("&Goto Line"),
_("Goto Line Number"))
viewmenu.AppendEx(ed_glob.ID_GOTO_MBRACE, _("Goto Matching Brace"),
_("Move caret matching brace"))
viewmenu.AppendSeparator()
viewmenu.AppendEx(ed_glob.ID_NEXT_POS, _("Next Position"),
_("Goto next position in history."))
viewmenu.AppendEx(ed_glob.ID_PRE_POS, _("Previous Position"),
_("Goto previous position in history."))
viewmenu.AppendSeparator()
viewmenu.AppendEx(ed_glob.ID_NEXT_MARK, _("Next Bookmark"),
_("View Line of Next Bookmark"))
viewmenu.AppendEx(ed_glob.ID_PRE_MARK, _("Previous Bookmark"),
_("View Line of Previous Bookmark"))
viewmenu.AppendSeparator()
viewmenu.AppendEx(ed_glob.ID_SHOW_SB, ("Status &Bar"),
_("Show Status Bar"), wx.ITEM_CHECK)
viewmenu.AppendEx(ed_glob.ID_VIEW_TOOL, _("&Toolbar"),
_("Show Toolbar"), wx.ITEM_CHECK)
# Attach to menubar
self.Append(viewmenu, _("&View"))
self._menus['view'] = viewmenu
def GenFormatMenu(self):
"""Makes and attaches the format menu
@return: None
"""
formatmenu = EdMenu()
formatmenu.AppendEx(ed_glob.ID_FONT, _("&Font"),
_("Change Font Settings"))
formatmenu.AppendSeparator()
formatmenu.AppendEx(ed_glob.ID_TOGGLECOMMENT, _("Toggle Comment"),
_("Toggle comment on the selected line(s)"))
formatmenu.AppendSeparator()
formatmenu.AppendEx(ed_glob.ID_INDENT, _("Indent Lines"),
_("Indent the selected lines"))
formatmenu.AppendEx(ed_glob.ID_UNINDENT, _("Unindent Lines"),
_("Unindent the selected lines"))
formatmenu.AppendSeparator()
formatmenu.AppendEx(ed_glob.ID_TO_UPPER, _("Uppercase"),
_("Convert selected text to all uppercase letters"))
formatmenu.AppendEx(ed_glob.ID_TO_LOWER, _("Lowercase"),
_("Convert selected text to all lowercase letters"))
formatmenu.AppendSeparator()
formatmenu.AppendEx(ed_glob.ID_USE_SOFTTABS, _("Use Soft Tabs"),
_("Insert spaces instead of tab "
"characters with tab key"), wx.ITEM_CHECK)
formatmenu.AppendEx(ed_glob.ID_WORD_WRAP, _("Word Wrap"),
_("Wrap Text Horizontally"), wx.ITEM_CHECK)
formatmenu.AppendSeparator()
# Whitespace submenu
whitespace = self._menus['whitespaceformat'] = EdMenu()
whitespace.AppendEx(ed_glob.ID_SPACE_TO_TAB, _("Spaces to Tabs"),
_("Convert spaces to tabs in selected/all text"))
whitespace.AppendEx(ed_glob.ID_TAB_TO_SPACE, _("Tabs to Spaces"),
_("Convert tabs to spaces in selected/all text"))
whitespace.AppendEx(ed_glob.ID_TRIM_WS, _("Trim Trailing Whitespace"),
_("Remove trailing whitespace"))
formatmenu.AppendMenu(ed_glob.ID_WS_FORMAT, _("Whitespace"), whitespace,
_("Whitespace formating commands"))
# Line EOL formatting submenu
lineformat = self._menus['lineformat'] = EdMenu()
lineformat.AppendEx(ed_glob.ID_EOL_MAC, _("Old Macintosh (\\r)"),
_("Format all EOL characters to %s Mode") % \
_(u"Old Macintosh (\\r)"), wx.ITEM_CHECK)
lineformat.AppendEx(ed_glob.ID_EOL_UNIX, _("Unix (\\n)"),
_("Format all EOL characters to %s Mode") % \
_(u"Unix (\\n)"), wx.ITEM_CHECK)
lineformat.AppendEx(ed_glob.ID_EOL_WIN, _("Windows (\\r\\n)"),
_("Format all EOL characters to %s Mode") % \
_("Windows (\\r\\n)"), wx.ITEM_CHECK)
formatmenu.AppendMenu(ed_glob.ID_EOL_MODE, _("EOL Mode"), lineformat,
_("End of line character formatting"))
# Attach to menubar
self.Append(formatmenu, _("F&ormat"))
self._menus['format'] = formatmenu
def GenSettingsMenu(self):
"""Makes and attaches the settings menu
@return: None
"""
settingsmenu = EdMenu()
settingsmenu.AppendEx(ed_glob.ID_AUTOCOMP, _("Auto-Completion"),
_("Use Auto Completion when available"), wx.ITEM_CHECK)
settingsmenu.AppendEx(ed_glob.ID_AUTOINDENT, _("Auto-Indent"),
_("Toggle Auto-Indentation functionality"),
wx.ITEM_CHECK)
settingsmenu.AppendEx(ed_glob.ID_BRACKETHL, _("Bracket Highlighting"),
_("Highlight Brackets/Braces"), wx.ITEM_CHECK)
settingsmenu.AppendEx(ed_glob.ID_FOLDING, _("Code Folding"),
_("Toggle Code Folding"), wx.ITEM_CHECK)
settingsmenu.AppendEx(ed_glob.ID_SYNTAX, _("Syntax Highlighting"),
_("Color Highlight Code Syntax"), wx.ITEM_CHECK)
settingsmenu.AppendSeparator()
# Lexer Menu Appended later by main frame
self.Append(settingsmenu, _("&Settings"))
self._menus['settings'] = settingsmenu
self.GenLexerMenu()
def GenToolsMenu(self):
"""Makes and attaches the tools menu
@return: None
"""
toolsmenu = EdMenu()
toolsmenu.AppendEx(ed_glob.ID_COMMAND, _("Editor Command"),
_("Goto command buffer"))
toolsmenu.AppendEx(ed_glob.ID_SESSION_BAR, _("Session Manager"),
_("Show the session manager bar"))
toolsmenu.AppendEx(ed_glob.ID_PLUGMGR, _("Plugin Manager"),
_("Manage, Download, and Install plugins"))
toolsmenu.AppendEx(ed_glob.ID_STYLE_EDIT, _("Style Editor"),
_("Edit the way syntax is highlighted"))
toolsmenu.AppendSeparator()
# macro = EdMenu()
# macro.Append(ed_glob.ID_MACRO_START, _("Record Macro"),
# _("Start macro recording"))
# macro.Append(ed_glob.ID_MACRO_STOP, _("Stop Recording"),
# _("Stop macro recording"))
# macro.Append(ed_glob.ID_MACRO_PLAY, "Play Macro", "Play Macro")
# toolsmenu.AppendMenu(wx.NewId(), _("Macros"), macro, _("Macro Tools"))
# Attach to menubar
self.Append(toolsmenu, _("&Tools"))
self._menus['tools'] = toolsmenu
def GenHelpMenu(self):
"""Makes and attaches the help menu
@return: None
"""
helpmenu = EdMenu()
helpmenu.AppendEx(ed_glob.ID_ABOUT, _("&About..."),
_("About") + u"...")
helpmenu.AppendEx(ed_glob.ID_HOMEPAGE, _("Project Homepage..."),
_("Visit the project homepage %s") % ed_glob.HOME_PAGE)
helpmenu.AppendEx(ed_glob.ID_DOCUMENTATION,
_("Online Documentation..."),
_("Online project documentation and help guides"))
helpmenu.AppendEx(ed_glob.ID_TRANSLATE, _("Translate Editra..."),
_("Editra translations project"))
helpmenu.AppendEx(ed_glob.ID_BUG_TRACKER, _("Bug Tracker..."))
helpmenu.AppendEx(ed_glob.ID_CONTACT, _("Feedback"),
_("Send bug reports and suggestions"))
# Attach to menubar
self.Append(helpmenu, _("&Help"))
self._menus['help'] = helpmenu
@classmethod
def GetKeyBinder(cls):
"""Return the classes keybinder object
@param cls: Class Object
@return: KeyBinder
"""
return cls.keybinder
def GetMenuByName(self, namestr):
"""Find and return a menu by name
@param namestr: menuitems label
@return: menuitem or None if not found
"""
return self._menus.get(namestr.lower(), None)
def GetMenuMap(self):
"""Get a mapping of all menus to (menu id, menu label)
@return: list of dict
"""
menumap = list()
for menu in self.GetMenus():
menumap.append(WalkMenu(menu[0], menu[1], dict()))
return menumap
@classmethod
def NewKeyProfile(cls, pname):
"""Make a new key profile that is a clone of the current one
@param cls: Class Object
@param pname: Name to give new profile
"""
cls.keybinder.SetProfileName(pname)
cls.keybinder.SaveKeyProfile()
def OnCreateLexerMenu(self, msg):
"""Recreate the lexer menu"""
self.GenLexerMenu()
def OnLoadProfile(self, msg):
"""Load and set the current key profile
@param msg: ed_msg.EDMSG_MENU_LOADPROFILE
@note: if message data is None the default bindings will be set
"""
keyprof = msg.GetData()
if keyprof is not None:
self.SetKeyProfile(keyprof)
else:
EdMenuBar.keybinder.LoadDefaults()
def OnRebind(self, msg):
"""Rebind all menu shortcuts when a rebind message is recieved
@param msg: ed_msg.EDMSG_MENU_REBIND
"""
self.RebindKeys()
def RebindKeys(self):
"""Reset all key bindings based on current binder profile"""
for menu in self.GetMenus():
for item in IterateMenuItems(menu[0]):
item_id = item.GetId()
binding = EdMenuBar.keybinder.GetBinding(item_id)
empty_binding = not len(binding)
if not empty_binding:
# Verify binding and clear invalid ones from binder
tmp = [key.title() for key in binding.strip().split(u'+')]
nctrl = len([key for key in tmp
if key not in (u'Ctrl', u'Alt', u'Shift')])
if len(tmp) > 3 or not nctrl:
EdMenuBar.keybinder.SetBinding(item_id, u'')
continue
# Reset the binding in the binder to ensure it is
# correctly formatted.
binding = u"\t" + u"+".join(tmp)
EdMenuBar.keybinder.SetBinding(item_id, binding)
clbl = item.GetText()
# Update the item if the shortcut has changed
if ('\t' in clbl and (not clbl.endswith(binding) or empty_binding)) or \
('\t' not in clbl and not empty_binding):
# wxBug? Getting the text of a menuitem is supposed to
# return it with the accelerators but under gtk the string
# has underscores '_' where it was supposed to have '&'
if wx.Platform == '__WXGTK__':
clbl = clbl.replace('_', '&', 1)
item.SetText(clbl.split('\t')[0].strip() + binding)
def ResetIcons(self):
"""Walk through each menu item in all of the bars menu and
reapply icons where possible.
@note: Don't use, sort of works on mac, does nothing on gtk, and causes
graphical glitches on msw.
"""
for menu in self.GetMenus():
WalkAndSetBitmaps(menu[0])
@classmethod
def SaveKeyProfile(cls):
"""Save the current key profile"""
cls.keybinder.SaveKeyProfile()
def SetKeyProfile(self, pname):
"""Set the current key profile and update the bindings
@param pname: Name of keyprofile to load
"""
EdMenuBar.keybinder.LoadKeyProfile(pname)
self.RebindKeys()
#-----------------------------------------------------------------------------#
#---- Private Objects/Functions ----#
_DEFAULT_BINDING = { # File Menu
ed_glob.ID_NEW : (u"Ctrl", u"N"),
ed_glob.ID_NEW_WINDOW : (u"Ctrl", u"Shift", u"N"),
ed_glob.ID_OPEN : (u"Ctrl", u"O"),
ed_glob.ID_CLOSE : (u"Ctrl", u"W"),
ed_glob.ID_CLOSE_WINDOW : (u"Ctrl", u"Shift", u"W"),
ed_glob.ID_SAVE : (u"Ctrl", u"S"),
ed_glob.ID_SAVEAS : (u"Ctrl", u"Shift", u"S"),
ed_glob.ID_PRINT_SU : (u"Ctrl", u"Shift", u"P"),
ed_glob.ID_PRINT : (u"Ctrl", u"P"),
ed_glob.ID_EXIT : (u"Ctrl", u"Q"),
# Edit Menu
ed_glob.ID_UNDO : (u"Ctrl", u"Z"),
ed_glob.ID_REDO : (u"Ctrl", u"Shift", u"Z"),
ed_glob.ID_CUT : (u"Ctrl", u"X"),
ed_glob.ID_COPY : (u"Ctrl", u"C"),
ed_glob.ID_PASTE : (u"Ctrl", u"V"),
ed_glob.ID_PASTE_AFTER : (u"Ctrl", u"Shift", u"V"),
ed_glob.ID_CYCLE_CLIPBOARD : (u"Ctrl", u"I"),
ed_glob.ID_SELECTALL : (u"Ctrl", u"A"),
ed_glob.ID_COLUMN_MODE : (u"Ctrl", u"Shift", u"|"),
ed_glob.ID_LINE_AFTER : (u"Ctrl", u"L"),
ed_glob.ID_LINE_BEFORE : (u"Ctrl", u"Shift", u"L"),
ed_glob.ID_CUT_LINE : (u"Ctrl", u"D"),
ed_glob.ID_DELETE_LINE : (u"Ctrl", u"Shift", "D"),
ed_glob.ID_COPY_LINE : (u"Ctrl", u"Y"),
ed_glob.ID_DUP_LINE : (u"Ctrl", u"Shift", u"C"),
ed_glob.ID_JOIN_LINES : (u"Ctrl", u"J"),
ed_glob.ID_TRANSPOSE : (u"Ctrl", u"T"),
ed_glob.ID_LINE_MOVE_UP : (u"Ctrl", u"Shift", u"Up"),
ed_glob.ID_LINE_MOVE_DOWN : (u"Ctrl", u"Shift", u"Down"),
ed_glob.ID_ADD_BM : (u"Ctrl", u"B"),
ed_glob.ID_SHOW_AUTOCOMP : (u"Ctrl", u"Space"),
ed_glob.ID_SHOW_CALLTIP : (u"Ctrl", u"9"),
ed_glob.ID_FIND : (u"Ctrl", u"Shift", u"F"),
ed_glob.ID_FIND_PREVIOUS : (u"Shift", u"F3"),
ed_glob.ID_FIND_NEXT : (u"F3",),
ed_glob.ID_FIND_REPLACE : (u"Ctrl", u"R"),
ed_glob.ID_QUICK_FIND : (u"Ctrl", u"F"),
ed_glob.ID_FIND_SELECTED : (u"Ctrl", u"F3"),
# View Menu
ed_glob.ID_ZOOM_IN : (u"Ctrl", u"+"),
ed_glob.ID_ZOOM_OUT : (u"Ctrl", u"-"),
ed_glob.ID_ZOOM_NORMAL : (u"Ctrl", u"0"),
ed_glob.ID_GOTO_LINE : (u"Ctrl", u"G"),
ed_glob.ID_GOTO_MBRACE : (u"Ctrl", u"Shift", u"B"),
ed_glob.ID_TOGGLE_FOLD : (u"Ctrl", u"Shift", u"T"),
ed_glob.ID_NEXT_POS : (u"Ctrl", u"Shift", u">"),
ed_glob.ID_PRE_POS : (u"Ctrl", u"Shift", u"<"),
ed_glob.ID_NEXT_MARK : (u"Alt", u"Right"), # Win/Linux
ed_glob.ID_PRE_MARK : (u"Alt", u"Left"), # Win/Linux
ed_glob.ID_SHOW_SHELF : (u"Ctrl", u"Alt", u"S"),
ed_glob.ID_PANELIST : (u"Alt", u"1"), # Win/Linux
ed_glob.ID_MAXIMIZE_EDITOR : (u"Ctrl", u"M"),
# Format Menu
ed_glob.ID_TOGGLECOMMENT : (u"Ctrl", u"1"),
ed_glob.ID_INDENT : (u"Tab",),
ed_glob.ID_UNINDENT : (u"Shift", u"Tab"),
ed_glob.ID_USE_SOFTTABS : (u"Ctrl", u"Shift", u"I"),
# Tools Menu
ed_glob.ID_COMMAND : (u"Ctrl", u"E"),
ed_glob.ID_SESSION_BAR : (u"Ctrl", u"K"),
ed_glob.ID_RUN_LAUNCH : (u"F5",),
ed_glob.ID_LAUNCH_LAST : (u"Shift", u"F5")
}
# Set some platform specific keybindings
if wx.Platform == '__WXMAC__':
_DEFAULT_BINDING[ed_glob.ID_NEXT_MARK] = (u"Ctrl", u"Down")
_DEFAULT_BINDING[ed_glob.ID_PRE_MARK] = (u"Ctrl", u"Up")
_DEFAULT_BINDING[ed_glob.ID_FIND_PREVIOUS] = (u"Ctrl", u"Shift", u"G")
_DEFAULT_BINDING[ed_glob.ID_FIND_NEXT] = (u"Ctrl", u"G")
_DEFAULT_BINDING[ed_glob.ID_GOTO_LINE] = (u"Ctrl", u"Shift", u"E")
_DEFAULT_BINDING[ed_glob.ID_PANELIST] = (u"Alt", u"Tab")
_DEFAULT_BINDING[ed_glob.ID_MAXIMIZE_EDITOR] = (u"Alt", u"M")
_DEFAULT_BINDING[ed_glob.ID_FIND_SELECTED] = (u"Ctrl", u"3")
elif wx.Platform == '__WXMSW__':
# FIXME: On Windows if Tab is bound to a menu item it is no longer
# usable elsewhere such as in the stc control. On Mac/Gtk there
# are not problems with it.
_DEFAULT_BINDING[ed_glob.ID_INDENT] = (u"",)
else:
pass
def _FindStringRep(item_id):
"""Find the string representation of the given id value
@param item_id: int
@return: string or None
"""
for obj in dir(ed_glob):
if getattr(ed_glob, obj) == item_id:
return obj
else:
return None
def _GetValueFromStr(item_str):
"""Get the id value from the string representation of the object
@param item_str: items variable string
@return: int or None
"""
return getattr(ed_glob, item_str, None)
#---- Public Functions ----#
def IterateMenuItems(menu):
"""Recursively walk and yield menu items as the are found. Only menu
items are yielded, not submenus or separators.
@param menu: menu to iterate
"""
for item in menu.GetMenuItems():
if item.IsSubMenu():
for subitem in IterateMenuItems(item.GetSubMenu()):
yield subitem
if not item.IsSeparator():
yield item
else:
continue
def WalkAndSetBitmaps(menu):
"""Recursively walk a menu and its submenus setting bitmaps
as necessary/available, using the the current theme.
"""
for item in menu.GetMenuItems():
if item.IsSubMenu():
WalkAndSetBitmaps(item.GetSubMenu())
else:
bmp = wx.ArtProvider.GetBitmap(str(item.GetId()), wx.ART_MENU)
if bmp.IsOk():
item.SetBitmap(bmp)
elif not item.GetBitmap().IsNull():
item.SetBitmap(wx.NullBitmap)
else:
continue
def WalkMenu(menu, label, collection):
"""Recursively walk a menu and collect all its sub items
@param menu: wxMenu to walk
@param label: the menu's label
@param collection: dictionary to collect results in
@return: dict {menulabel : [menu id, (item1 id, label1),]}
"""
if label not in collection:
collection[label] = list()
for item in menu.GetMenuItems():
i_id = item.GetId()
if item.IsSubMenu():
# Ignore dynamically generated menus
if i_id not in (ed_glob.ID_FHIST, ed_glob.ID_LEXER,
ed_glob.ID_PERSPECTIVES):
ilbl = item.GetItemLabelText()
collection[ilbl] = [i_id, ]
WalkMenu(item.GetSubMenu(), ilbl, collection)
else:
continue
elif item.IsSeparator():
continue
elif _FindStringRep(i_id) is not None:
lbl = item.GetItemLabelText().split('\t')[0].strip()
# wxBug? Even the methods that are supposed to return the text
# without mnemonics or accelerators on gtk return the string with
# underscores where the mnemonics '&' are in the original strings
if wx.Platform == '__WXGTK__':
lbl = lbl.replace('_', '', 1)
collection[label].append((i_id, lbl))
else:
continue
return collection
| ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/ed_menu.py | Python | mit | 51,531 |
from evolib.formats.IteratorObjects import FastaAlignment
class FastaFormat(FastaAlignment):
"""
Example usage:
>>> from evolib.SequenceFormats import FastaFormat
Example 1:
>>> fileObject = open('example.fsa', 'r')
>>> F = FastaFormat(fileObject)
Example 2:
>>> import sys
>>> stdinObject = sys.stdin
>>> F = FastaFormat(stdinObject)
"""
pass
| padraicc/Evolib | evolib/SequenceFormats.py | Python | mit | 446 |
import asyncio
from datetime import datetime
from typing import List, Optional
import aiohttp
from asyncpg.pool import Pool
from . import db, parsers
from .extern import nhl
from .query import EventQuery, GameQuery, PlayerQuery, TeamQuery
MAX_TEAM_ID = 101
async def _get_pool(pool: Pool = None) -> Pool:
if pool:
# already awaited
return pool
return await db.get_pool()
async def _download_team(team_id: int, session: aiohttp.ClientSession, pool: Pool = None,
sem: asyncio.Semaphore = asyncio.Semaphore()) -> Optional[dict]:
pool = await _get_pool(pool)
async with sem:
try:
team_data = await nhl.get_team(team_id=team_id, session=session)
return await _save_team(team_data, pool=pool)
except AssertionError:
return None
async def _download_game(game_id: int, session: aiohttp.ClientSession, pool: Pool = None,
sem: asyncio.Semaphore = asyncio.Semaphore()):
pool = await _get_pool(pool)
async with sem:
game_data = await nhl.get_live_data(game_id=game_id, session=session)
return await _save_game(game_data, pool=pool)
async def _save_game(game: dict, pool: Pool = None):
pool = await _get_pool(pool)
async with pool.transaction() as conn:
game_id = int(game['gamePk'])
game_data = game['gameData']
game_status = game_data['status']['abstractGameState']
if game_status == 'Final':
game_version = int(game['metaData']['timeStamp'])
else:
game_version = -1
game_obj = parsers.game(game_id, game_version, game)
player_query = PlayerQuery(conn)
for _, player in game_data['players'].items():
await player_query.insert(parsers.player(player))
await GameQuery(conn).insert(game_obj)
event_query = EventQuery(conn)
for event in game['liveData']['plays']['allPlays']:
ev = parsers.event(game_id, game_version, event)
if ev is None:
continue
await event_query.insert(ev)
return game_obj
async def _get_teams(team_ids: List[int], pool: Pool = None):
pool = await _get_pool(pool)
async with pool.acquire() as conn:
for row in await TeamQuery(conn).get_all(team_ids):
yield dict(row)
async def _get_games(game_ids: List[int], pool: Pool = None):
pool = await _get_pool(pool)
async with pool.acquire() as conn:
for row in await GameQuery(conn).get_all(game_ids):
yield dict(row)
async def get_team(team_id: int, pool: Pool = None):
pool = await _get_pool(pool)
async with aiohttp.ClientSession() as session:
return await _download_team(team_id, session, pool=pool)
async def get_game(game_id: int, pool: Pool = None):
pool = await _get_pool(pool)
async with aiohttp.ClientSession() as session:
return await _download_game(game_id, session, pool=pool)
async def _save_team(team: dict, pool: Pool = None) -> dict:
pool = await _get_pool(pool)
async with pool.acquire() as conn:
team_obj = parsers.team(team)
await TeamQuery(conn).insert(team_obj)
return team_obj
async def get_teams(concurrency: int = 4, pool: Pool = None):
pool = await _get_pool(pool)
semaphore = asyncio.Semaphore(concurrency)
async with aiohttp.ClientSession() as session:
all_team_ids = range(1, MAX_TEAM_ID)
task = [_download_team(team_id, session=session, sem=semaphore, pool=pool) for team_id in all_team_ids]
results = await asyncio.gather(*task)
return sorted([r for r in results if r is not None], key=lambda k: k['id'])
async def get_games(from_date: datetime, to_date: datetime, concurrency: int = 4, pool: Pool = None):
pool = await _get_pool(pool)
semaphore = asyncio.Semaphore(concurrency)
async with aiohttp.ClientSession() as session:
schedule = await nhl.get_schedule_games(from_date=from_date, to_date=to_date, session=session)
all_game_ids = [game['gamePk'] for game in schedule]
existing_games = [g async for g in _get_games(all_game_ids, pool=pool)]
need_download = set(all_game_ids) - set([game['id'] for game in existing_games])
task = [_download_game(game_id, session=session, sem=semaphore, pool=pool) for game_id in need_download]
results = await asyncio.gather(*task)
results.extend(existing_games)
return sorted(results, key=lambda k: k['id'])
| aaront/puckdb | puckdb/fetch.py | Python | apache-2.0 | 4,522 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscópio Tecnologia
# Author: Luciana Fujii Pontello <luciana@holoscopio.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
from sltv.output import *
from sltv.audio import *
from sltv.sltv import *
from sltv.settings import UI_DIR
import about
import sources
import message
import outputs
import encoders
import sources_view
import outputs_view
from previewarea import PreviewArea
import preview
import effects
import overlay
import volume
import settings as settings
import pip_widget
import metadata
import fvumeter
class SltvUI:
def __init__(self):
self.interface = gtk.Builder()
self.interface.add_from_file(UI_DIR + "/sltv.ui")
self.main_window = self.interface.get_object("window1")
self.main_window.show_all()
self.about = about.About(self)
self.sltv = Sltv()
self.sltv.connect("stopped", self.stopped)
self.sltv.connect("playing", self.playing)
self.sltv.connect("error", self.error)
self.sltv.connect("pipeline-ready", self.on_pipeline_ready)
self.preview_frame = self.interface.get_object("preview_frame")
self.preview_box = self.interface.get_object("preview_vbox")
self.preview = preview.PreviewUI(self, self.sltv)
self.preview_box.pack_start(self.preview.get_widget(), False, False)
self.preview_area = PreviewArea()
self.preview_frame.add(self.preview_area)
self.preview_area.show()
self.box = self.interface.get_object("paned")
self.settings = settings.SettingsUI(self, self.sltv)
self.box.add(self.settings.get_widget())
self.play_button = self.interface.get_object("play_button")
self.stop_button = self.interface.get_object("stop_button")
self.settings_dialog = gtk.Dialog('Settings', self.main_window)
self.settings_dialog.set_default_size(400, 400)
self.encoders = self.sltv.encoders
self.videoconverters = self.sltv.videoconverters
self.encoders_ui = encoders.Encoders(
self, self.encoders, self.videoconverters
)
self.metadata_ui = metadata.MetadataUI(self.sltv, self.settings_dialog)
self.hbuttonbox = self.interface.get_object("hbuttonbox1")
self.volume = volume.VolumeUI(self, self.sltv)
self.volume_button = self.volume.get_widget()
self.volume_button.show()
self.hbuttonbox.pack_start(self.volume_button)
self.sltv.vus = []
meter = fvumeter.FVUMeter()
meter.only_draw_peak = True
self.sltv.vus.append(meter)
meter = fvumeter.FVUMeter()
self.sltv.vus.append(meter)
self.sltv.vus[0].show()
self.sltv.vus[1].show()
self.vumeter_box = self.interface.get_object("vumeter_box")
if self.vumeter_box:
self.vumeter_box.add(self.sltv.vus[0])
self.vumeter_box.add(self.sltv.vus[1])
# pip
pip_box = self.interface.get_object("pip_box")
self.pip_selector = pip_widget.PIPSelector()
self.pip_selector.connect("changed", self.on_pip_changed)
pip_box.add(self.pip_selector)
pip_box.show_all()
# sources
self.sources = self.sltv.sources
self.audioconvs = self.sltv.audioconvs
self.sources_ui = sources.Sources(self, self.sources, self.audioconvs)
self.video_source_box = self.interface.get_object("video_source_box")
self.sources_view = sources_view.SourcesView(self.sltv, self.sources)
self.sources_view.show_all()
self.video_source_box.pack_start(self.sources_view, False, False)
# audio combobox
self.audio_sources_combobox = self.interface.get_object(
"audio_sources_combobox"
)
self.audio_sources_combobox.set_model(
sources.AudioModel(self.sources).model
)
cell = gtk.CellRendererText()
self.audio_sources_combobox.pack_start(cell, True)
self.audio_sources_combobox.add_attribute(cell, "text", 0)
self.audio_sources_combobox.connect(
"changed", self.on_select_audio_source
)
self.audio_sources_combobox.set_active(0)
# outputs
self.outputs = self.sltv.outputs
self.outputs_ui = outputs.Outputs(self, self.outputs, self.encoders)
self.outputs_box = self.interface.get_object("outputs_box")
self.outputs_view = outputs_view.OutputsView( self.sltv, self.outputs)
self.outputs_view.show_all()
self.outputs_box.pack_start(self.outputs_view, False, False)
# settings dialog
self.settings_dialog.set_has_separator(False)
self.settings_dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
self.settings_dialog.connect('delete-event', self.hide_settings)
self.settings_dialog.connect('response', self.hide_settings)
vbox = self.settings_dialog.get_content_area()
vbox.set_border_width(12)
notebook = gtk.Notebook()
self.settings_notebook = notebook
vbox.add(notebook)
vbox = gtk.VBox()
vbox.set_border_width(12)
vbox.pack_start(self.sources_ui.get_widget())
notebook.append_page(vbox, gtk.Label('Sources'))
vbox = gtk.VBox()
vbox.set_border_width(12)
vbox.pack_start(self.encoders_ui.get_widget())
notebook.append_page(vbox, gtk.Label('Encoders'))
vbox = gtk.VBox()
vbox.set_border_width(12)
vbox.pack_start(self.outputs_ui.get_widget())
notebook.append_page(vbox, gtk.Label('Outputs'))
vbox = gtk.VBox()
vbox.set_border_width(12)
vbox.pack_start(self.metadata_ui.get_widget())
notebook.append_page(vbox, gtk.Label('Metadata'))
#menu
self.settings_menuitem = self.interface.get_object("settings_menuitem")
self.quit_menuitem = self.interface.get_object("quit_menuitem")
self.about_menu = self.interface.get_object("about_menu")
self.play_button.connect("clicked", self.on_play_press)
self.stop_button.connect("clicked", self.on_stop_press)
self.main_window.connect("delete_event", self.on_window_closed)
self.settings_menuitem.connect("activate", self.show_settings)
self.quit_menuitem.connect("activate", gtk.main_quit)
self.about_menu.connect("activate", self.show_about)
def on_pipeline_ready(self, sltv):
sltv_preview = self.sltv.get_preview()
self.preview_area.connect(sltv_preview)
def stopped(self, sltv):
self.stop_button.set_sensitive(False)
self.play_button.set_sensitive(True)
self.settings_menuitem.set_sensitive(True)
def playing(self, sltv):
self.play_button.set_sensitive(False)
self.stop_button.set_sensitive(True)
self.settings_menuitem.set_sensitive(False)
def error(self, sltv, msg):
message.MessageError(msg, self)
def selected_audio_source(self):
model = self.audio_sources_combobox.get_model()
iter = self.audio_sources_combobox.get_active_iter()
if iter is None:
return None
return model.get_value(iter, 0)
def on_select_audio_source(self, combobox):
source_name = self.selected_audio_source()
self.sltv.set_audio_source(source_name)
def on_pip_changed(self, widget, selected):
self.sltv.set_pip_position(selected)
def on_play_press(self, event):
if not self.sources_view.has_item_a_selected():
message.MessageInfo(
"Please, choose or add a video source.", self
)
return False
self.play_button.set_sensitive(False)
self.play()
def play(self):
if not self.sltv.playing():
self.sltv.play()
def show_settings(self, menuitem = None):
self.settings_dialog.show_all()
def hide_settings(self, *args):
self.settings_dialog.hide()
self.settings_notebook.set_current_page(0)
return True
def show_about(self, menuitem):
self.about.show_window()
def on_stop_press(self, event):
self.stop_button.set_sensitive(False)
self.stop()
def stop(self):
if self.sltv.playing():
self.sltv.stop()
def on_window_closed(self, event, data):
gtk.main_quit()
| lucasa/landell-fgdp | sltv/ui/core.py | Python | gpl-2.0 | 9,116 |
"""
FableGenerator
-- DO NOT EDIT THIS FILE --
-- EDIT FABLEME/UTILS.PY AND COPY THAT ONE --
utils.py
"""
import os
import logging
import time
import datetime
CHROME_DATE_FORMAT = '%Y-%m-%d'
IE10_DATE_FORMAT = '%m/%d/%Y'
OUTPUT_PATH = "output/"
RESOURCES_PATH = "resources/"
class BasicUtils(object):
@staticmethod
def is_date_valid(inputstring, inputformat):
""" Check if the input string contains
a date in the format specified """
was_converted = False
try:
struct_dt = time.strptime(inputstring, inputformat)
logging.debug('StructDT = ' + str(struct_dt))
datetime.date.fromtimestamp(time.mktime(struct_dt))
was_converted = True
except ValueError:
logging.debug('Unknown date format for '+inputstring+': defaulting...')
return was_converted
@staticmethod
def convert_date(inputstring, inputformat):
""" Convert string to a date """
retdt = None
try:
struct_dt = time.strptime(inputstring, inputformat)
retdt = datetime.date.fromtimestamp(time.mktime(struct_dt))
except ValueError:
logging.debug('Cannot convert date. Defaulting to 01/01/2000')
retdt = datetime.date(2000,01,01)
return retdt
@staticmethod
def string_to_date(inputstring):
""" Convert a string read from an <input type='date'> to a date """
retdt = None
if (BasicUtils.is_date_valid(inputstring, CHROME_DATE_FORMAT)):
logging.debug('Trying to convert #' + inputstring + '#: it seems a CHROME date...')
retdt = BasicUtils.convert_date(inputstring, CHROME_DATE_FORMAT)
elif (BasicUtils.is_date_valid(inputstring, IE10_DATE_FORMAT)):
logging.debug('Trying to convert #' + inputstring + '# it seems a IE10 date...')
retdt = BasicUtils.convert_date(inputstring, IE10_DATE_FORMAT)
else:
logging.debug('Cannot convert date. Defaulting to 01/01/2000')
retdt = datetime.date(2000,01,01)
return retdt
@staticmethod
def get_output_path(filename):
return os.path.join(OUTPUT_PATH, filename)
@staticmethod
def get_from_relative_resources(filename):
return os.path.join(RESOURCES_PATH, filename)
@staticmethod
def normalize_path(filename):
""" Normalize a path under Google App Engine """
return os.path.normpath(filename)
class GoogleUtils(BasicUtils):
@staticmethod
def get_from_relative_resources(filename):
return os.path.join(RESOURCES_PATH, filename)
@staticmethod
def get_from_google(filename):
""" Get the absolute path of a file stored under Google App Engine """
return GoogleUtils.__get_google_app_path(filename)
@staticmethod
def get_from_resources(filename):
""" Get a file stored in RESOURCE_PATH under Google App Engine """
filepath = GoogleUtils.get_from_relative_resources(filename)
return GoogleUtils.__get_google_app_path(filepath)
@staticmethod
def __get_google_app_path(filepath):
dirpath = os.path.dirname(os.path.split(__file__)[0])
abnormpath = os.path.join(dirpath, filepath)
return os.path.normpath(abnormpath)
| guildenstern70/fablegenerator | fablegenerator/fableme/utils.py | Python | mit | 3,462 |
__author__ = 'nathan'
import json
import pysolr
import config
import models
import unicodedata
def main():
from sqlalchemy.orm import sessionmaker
if hasattr(config, "solr") and config.solr == "lib_prod":
blake_object_solr = pysolr.Solr('http://webapp.lib.unc.edu:8200/solr/blake/blake_object')
blake_copy_solr = pysolr.Solr('http://webapp.lib.unc.edu:8200/solr/blake/blake_copy')
blake_work_solr = pysolr.Solr('http://webapp.lib.unc.edu:8200/solr/blake/blake_work')
elif hasattr(config, "solr") and config.solr == "lib_dev":
blake_object_solr = pysolr.Solr('http://london.libint.unc.edu:8983/solr/blake_object')
blake_copy_solr = pysolr.Solr('http://london.libint.unc.edu:8983/solr/blake_copy')
blake_work_solr = pysolr.Solr('http://london.libint.unc.edu:8983/solr/blake_work')
else:
blake_object_solr = pysolr.Solr('http://localhost:8983/solr/blake_object')
blake_copy_solr = pysolr.Solr('http://localhost:8983/solr/blake_copy')
blake_work_solr = pysolr.Solr('http://localhost:8983/solr/blake_work')
engine = models.db.create_engine(config.db_connection_string)
session = sessionmaker(bind=engine)()
objects = session.query(models.BlakeObject).all()
blake_object_solr.delete(q='*:*')
for blake_object in objects:
try:
if blake_object.supplemental is None:
obj = {
"id": blake_object.object_id,
"title": blake_object.title,
"bentley_id": blake_object.bentley_id,
"dbi": blake_object.dbi,
"desc_id": blake_object.desc_id,
"copy_id": blake_object.copy_bad_id,
"characteristics": blake_object.characteristics,
"components": json.dumps(blake_object.components),
"illustration_description": json.dumps(blake_object.illustration_description),
"text": json.dumps(blake_object.text),
"copy_title": blake_object.copy.title,
"copy_institution": blake_object.copy.institution,
# FIXME: properly convert unicode rather than stripping characters
"notes": json.dumps([unicodedata.normalize('NFKD', note["note"]).encode('ascii', 'ignore') for note in blake_object.notes])
}
print obj["id"]
if blake_object.copy.work:
obj["work_title"] = blake_object.copy.work.title
obj["work_id"] = blake_object.copy.work.bad_id
obj["composition_date"] = blake_object.copy.composition_date
obj["print_date"] = blake_object.copy.print_date
obj["medium"] = blake_object.copy.work.medium
blake_object_solr.add([obj])
except pysolr.SolrError as err:
print err
blake_object_solr.optimize()
copies = session.query(models.BlakeCopy).all()
blake_copy_solr.delete(q='*:*')
for blake_copy in copies:
copy_ = {
"id": blake_copy.copy_id,
"bad_id": blake_copy.bad_id,
"source": blake_copy.source,
"title": blake_copy.title,
"institution": blake_copy.institution,
"header": blake_copy.header,
"composition_date": blake_copy.composition_date,
"print_date": blake_copy.print_date,
"effective_copy_id": blake_copy.effective_copy_id
}
if blake_copy.work:
copy_["medium"] = blake_copy.work.medium
copy_["work_id"] = blake_copy.work.bad_id
blake_copy_solr.add([copy_])
blake_copy_solr.optimize()
works = session.query(models.BlakeWork).all()
blake_work_solr.delete(q='*:*')
for blake_work in works:
blake_work_solr.add([{
"id": blake_work.work_id,
"bad_id": blake_work.bad_id,
"title": blake_work.title,
"medium": blake_work.medium,
"info": blake_work.info,
"image": blake_work.image,
"composition_date": blake_work.composition_date,
"composition_date_string": blake_work.composition_date_string
}])
blake_work_solr.optimize()
if __name__ == "__main__":
main() | blakearchive/archive | blakearchive/solrimport.py | Python | gpl-2.0 | 4,341 |
from django.http import JsonResponse
class JSENDSuccess(JsonResponse):
def __init__(self, status_code, data={}):
super(JSENDSuccess, self).__init__(status=status_code, data={'status': 'success', 'data': data})
class JSENDFail(JsonResponse):
def __init__(self, status_code, data={}):
super(JSENDFail, self).__init__(status=status_code, data={'status': 'fail', 'data': data})
class JSENDError(JsonResponse):
def __init__(self, status_code, msg, code=None, data=None):
content = {'status': 'error', 'message': msg}
if code: jsend.update({'code': code})
if data: jsend.update({'data': data})
super(JSENDError, self).__init__(status=status_code, data=content)
| sangwonl/stage34 | webapp/api/helpers/http/jsend.py | Python | mit | 721 |
import unittest
from vFense.core._constants import *
from vFense.core.user._constants import *
from vFense.core.user.users import *
from vFense.core.group.groups import *
from vFense.core.group._constants import *
from vFense.core.customer.customers import *
from vFense.core.customer._constants import *
from vFense.core.permissions._constants import *
from vFense.errorz._constants import *
class UsersGroupsAndCustomersTests(unittest.TestCase):
def test_a_create_customer(self):
results = (
create_customer(
'test',
http_application_url_location='https://10.0.0.1/packages',
init=True
)
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_b_edit_customer(self):
props = {
CustomerKeys.OperationTtl: 20
}
results = edit_customer('test', **props)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_c_create_group1(self):
results = (
create_group(
'Tester 4 life Part 1', 'test', [Permissions.ADMINISTRATOR]
)
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_d_create_group2(self):
results = (
create_group(
'Tester 4 life Part 2', 'test',
[Permissions.INSTALL, Permissions.UNINSTALL]
)
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_e_create_user1(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 1', 'test').get(GroupKeys.GroupId)
)
results = create_user(
'test1', 'Unit Test 1', 'T35t#123',
[group_test_id], 'test', 'test@test.org', 'yes',
'tester', '/test', 'TEST'
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_f_create_user2(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 1', 'test').get(GroupKeys.GroupId)
)
results = create_user(
'test2', 'Unit Test 2', 'T35t#123',
[group_test_id], 'test', 'test@test.org', 'yes',
'tester', '/test', 'TEST'
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_g_create_user3(self):
group_test_id_1 = (
get_group_by_name('Tester 4 life Part 1', 'test').get(GroupKeys.GroupId)
)
group_test_id_2 = (
get_group_by_name('Tester 4 life Part 2', 'test').get(GroupKeys.GroupId)
)
results = create_user(
'test3', 'Unit Test 3', 'T35t#123',
[group_test_id_1, group_test_id_2],
'test', 'test@test.org', 'yes',
'tester', '/test', 'TEST'
)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_h_edit_user(self):
props = {
UserKeys.FullName: 'Ninja Unit Tester 1'
}
results = edit_user_properties('test1', **props)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_i_edit_password(self):
results = change_password('test1', 'T35t#123', 'T35t#1234')
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_j_add_user_to_group(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 2', 'test').get(GroupKeys.GroupId)
)
results = add_user_to_groups('test2', 'test', [group_test_id])
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_k_remove_groups_from_user(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 1', 'test').get(GroupKeys.GroupId)
)
results = remove_groups_from_user('test3', [group_test_id])
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
#"""
def test_l_remove_customers_from_user(self):
results = remove_customers_from_user('test3', ['test'])
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_m_remove_user(self):
results = remove_user('test3')
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_n_remove_users(self):
results = remove_users(['test1', 'test2'])
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_o_remove_groups1(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 1', 'test').get(GroupKeys.GroupId)
)
results = remove_group(group_test_id)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_p_remove_groups2(self):
group_test_id = (
get_group_by_name('Tester 4 life Part 2', 'test').get(GroupKeys.GroupId)
)
results = remove_group(group_test_id)
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
def test_q_remove_customer1(self):
results = remove_customer('test')
http_status_code = results.get(ApiResultKeys.HTTP_STATUS_CODE)
self.failUnless(http_status_code == 200)
#"""
#def testTwo(self):
# self.failIf(IsOdd(2))
def main():
unittest.main()
if __name__ == '__main__':
main()
| dtklein/vFense | tp/src/core/tests/users_groups_and_customers_test.py | Python | lgpl-3.0 | 6,128 |
# -*- coding: utf-8 -*-
# This module is a port of the Textblob Averaged Perceptron Tagger
# Author: Matthew Honnibal <honnibal+gh@gmail.com>,
# Long Duong <longdt219@gmail.com> (NLTK port)
# URL: <https://github.com/sloria/textblob-aptagger>
# <http://nltk.org/>
# Copyright 2013 Matthew Honnibal
# NLTK modifications Copyright 2015 The NLTK Project
#
# This module is provided under the terms of the MIT License.
from __future__ import absolute_import
from __future__ import print_function, division
import random
from collections import defaultdict
import pickle
import logging
from nltk.tag.api import TaggerI
from nltk.data import find, load
from nltk.compat import python_2_unicode_compatible
PICKLE = "averaged_perceptron_tagger.pickle"
class AveragedPerceptron(object):
'''An averaged perceptron, as implemented by Matthew Honnibal.
See more implementation details here:
http://spacy.io/blog/part-of-speech-POS-tagger-in-python/
'''
def __init__(self):
# Each feature gets its own weight vector, so weights is a dict-of-dicts
self.weights = {}
self.classes = set()
# The accumulated values, for the averaging. These will be keyed by
# feature/clas tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/clas tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features):
'''Dot-product the features and current weights and return the best label.'''
scores = defaultdict(float)
for feat, value in features.items():
if feat not in self.weights or value == 0:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += value * weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label))
def update(self, truth, guess, features):
'''Update the feature weights.'''
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self):
'''Average weights from all iterations.'''
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / self.i, 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
def save(self, path):
'''Save the pickled model weights.'''
with open(path, 'wb') as fout:
return pickle.dump(dict(self.weights), fout)
def load(self, path):
'''Load the pickled model weights.'''
self.weights = load(path)
@python_2_unicode_compatible
class PerceptronTagger(TaggerI):
'''
Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
http://spacy.io/blog/part-of-speech-POS-tagger-in-python/
>>> from nltk.tag.perceptron import PerceptronTagger
Train the model
>>> tagger = PerceptronTagger(load=False)
>>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')],
... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]])
>>> tagger.tag(['today','is','a','beautiful','day'])
[('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')]
Use the pretrain model (the default constructor)
>>> pretrain = PerceptronTagger()
>>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split())
[('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')]
>>> pretrain.tag("The red cat".split())
[('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')]
'''
START = ['-START-', '-START2-']
END = ['-END-', '-END2-']
def __init__(self, load=True):
'''
:param load: Load the pickled model upon instantiation.
'''
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if load:
AP_MODEL_LOC = 'file:'+str(find('taggers/averaged_perceptron_tagger/'+PICKLE))
self.load(AP_MODEL_LOC)
def tag(self, tokens):
'''
Tag tokenized sentences.
:params tokens: list of word
:type tokens: list(str)
'''
prev, prev2 = self.START
output = []
context = self.START + [self.normalize(w) for w in tokens] + self.END
for i, word in enumerate(tokens):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
output.append((word, tag))
prev2 = prev
prev = tag
return output
def train(self, sentences, save_loc=None, nr_iter=5):
'''Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
'''
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
words = [word for word,tag in sentence]
tags = [tag for word,tag in sentence]
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] \
+ self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, 'wb') as fout:
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, -1)
def load(self, loc):
'''
:param loc: Load a pickled model at location.
:type loc: str
'''
self.model.weights, self.tagdict, self.classes = load(loc)
self.model.classes = self.classes
def normalize(self, word):
'''
Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
def _make_tagdict(self, sentences):
'''
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
'''
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _pc(n, d):
return (n / d) * 100
def _load_data_conll_format(filename):
print ('Read from file: ', filename)
with open(filename,'rb') as fin:
sentences = []
sentence = []
for line in fin.readlines():
line = line.strip()
#print line
if len(line) ==0:
sentences.append(sentence)
sentence = []
continue
tokens = line.split('\t')
word = tokens[1]
tag = tokens[4]
sentence.append((word,tag))
return sentences
def _get_pretrain_model():
# Train and test on English part of ConLL data (WSJ part of Penn Treebank)
# Train: section 2-11
# Test : section 23
tagger = PerceptronTagger()
training = _load_data_conll_format('english_ptb_train.conll')
testing = _load_data_conll_format('english_ptb_test.conll')
print ('Size of training and testing (sentence)', len(training), len(testing))
# Train and save the model
tagger.train(training, PICKLE)
print ('Accuracy : ',tagger.evaluate(testing))
if __name__ == '__main__':
#_get_pretrain_model()
pass
| adazey/Muzez | libs/nltk/tag/perceptron.py | Python | gpl-3.0 | 11,552 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
APPLE = 'apple'
GOOGLE_PLAY = 'google_play'
AMAZON_APPSTORE = 'amazon_appstore'
WINDOWS_STORE = 'windows_store'
| mobify/python-appfigures | appfigures/stores.py | Python | mit | 194 |
# Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import l3_ext_gw_mode as apidef
from neutron_lib.api import extensions
class L3_ext_gw_mode(extensions.APIExtensionDescriptor):
api_definition = apidef
| noironetworks/neutron | neutron/extensions/l3_ext_gw_mode.py | Python | apache-2.0 | 820 |
from collections import namedtuple
from contextlib import closing
from functools import partial
DONE = 0
QUERY = 1
EXECUTE = 2 # can insert, but won't report insert id
INSERT = 3
RECURSE = 4 # recursive generator
def db_result(*val, **named):
assert len(val) == 0 or len(named) == 0
if len(val) == 1:
return (DONE, val[0])
elif len(val) > 1:
return (DONE, val)
elif len(named) != 0:
t = namedtuple('_', named.keys())
return (DONE, t(**named))
else:
return (DONE, None)
def db_input(t, sql, *args):
return (t, (sql, args))
db_query = partial(db_input, QUERY)
db_execute = partial(db_input, EXECUTE)
db_insert = partial(db_input, INSERT)
def db_recurse(g):
return (RECURSE, g)
def for_recurse(f):
def wrapper(*args, **kwargs):
return db_recurse(f(*args, **kwargs))
return wrapper
class _NextIteration(BaseException):
def __init__(value):
super(_NextIteration, self).__init__()
self.value = value
def _exec(cur, g):
def _run_for_gen(f, *args, **kwargs):
try:
return f(*args, **kwargs)
except StopIteration:
raise
except Exception as e:
raise _NextIteration(g.throw(type(e), e))
what, val = next(g)
while True:
try:
if what == DONE:
g.close()
return val
elif what == RECURSE:
try:
r = _run_for_gen(_exec, cur, val)
except StopIteration:
r = None
what, val = g.send(r)
else:
sql, args = val
_run_for_gen(cur.execute, sql, args)
rowc = cur.rowcount
rs = None
if what == QUERY:
rows = _run_for_gen(cur.fetchall)
cols = [c[0] for c in cur.description]
nt = namedtuple('_', cols)
rs = [nt(*list(r)) for r in rows]
rowc = len(rs)
elif what == INSERT:
rs = cur.lastrowid
what, val = g.send((rowc, rs))
except _NextIteration as e:
what, val = e.value
def db_txn(pool, gen, *args, **kwargs):
g = gen(*args, **kwargs)
with closing(pool.connection()) as conn:
try:
conn.begin()
except AttributeError:
pass
try:
with closing(conn.cursor()) as cur:
res = _exec(cur, g)
conn.commit()
return res
except StopIteration: # no val to return
conn.commit()
return None
def in_txn(f):
def wrapper(pool, *args, **kwargs):
return db_txn(pool, f, *args, **kwargs)
return wrapper
| echaozh/python-dbtxn | db_txn.py | Python | mit | 2,858 |
#-------------------------------------------------------------------------------
# This file is part of PyMad.
#
# Copyright (c) 2011, CERN. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
import couchdb
import logging
class Server():
def __init__(self,url='137.138.26.237',port=5984,user='jmad_user',password='iAmJmad99',dbname='cpymad_models'):
self._couch = couchdb.Server('http://'+user+':'+password+'@'+url+':'+str(port))
self._db = self._couch[dbname]
self._log = logging.getLogger(__name__)
def ls_models(self):
ret=[]
for doc in self._db:
ret.append(str(doc))
return ret
def get_file(self,model,fname):
'''
Returns the content of a file
in the form of a string.
'''
return self._db.get_attachment(model,fname)
def ls_files(self,model):
'''
Returns a list of all files defined for a model.
'''
d=self._db[model]
ret=[d['initscript']]
for optic in d['optics']:
ret.extend(optic['strengths'])
return ret
def get_model(self,model):
'''
Returns a model definition, which is a dictionary,
or more precisely a couchdb document.
'''
return self._db[model]
def put_model(self,modname,dictionary,**kwargs):
'''
Create a new model..
kwargs:
fnames: name of files as they should be on the server
fpaths: corresponding full paths for each file on your machine
'''
check_model_valid(dictionary)
if modname in self.ls_models():
doc=self._db[modname]
for k in dictionary:
doc[k]=dictionary[k]
dictionary=doc
self._db[modname]=dictionary
if 'fnames' in kwargs:
if len(kwargs['fnames'])!=len(kwargs['fpaths']):
raise ValueError("You need to give one filename for each attachment")
for (a,f) in zip(kwargs['fnames'],kwargs['fpaths']):
self._log.info("Uploading attachment %s", a)
content=open(f,'r')
self._db.put_attachment(self._db[modname], content, filename=a)
def del_model(self,modname):
self._db.delete(self._db[modname])
def check_model_valid(dictionary):
'''
We don't currently check..
'''
return True
| pymad/cpymad | src/cern/cpymad/_couch.py | Python | apache-2.0 | 3,001 |
import os
import sys
import datetime
from django.template.loader import add_to_builtins
# Django settings for methodmint project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Martin Fitzpatrick', 'mfitzp@abl.es'),
)
MANAGERS = ADMINS
# When calling via command line copy in SITE_ID from env
# (linux) use export SITE_ID=<site_id> to set
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'installables',
'USER': 'smrtr',
'PASSWORD': 'mould',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
DJANGO_SETTINGS_MODULE = 'settings'
MEDIA_URL_ROOT = ''
STATIC_URL_ROOT = ''
ADMIN_MEDIA_URL_ROOT = ''
DEFAULT_HOST = 'golifescience.com'
SESSION_COOKIE_DOMAIN = '.golifescience.com'
BLACKLIST_EMAIL_DOMAINS = []
# local_settings.py can be used to override environment-specific settings
# like database and email that diffeinstallables/static/admin/css/base.cssr between development and production.
try:
from local_settings import *
except ImportError:
pass
# *** DEFINE URLS HERE SO LOCAL SETTINGS CAN OVERRIDE PATH BASE ***
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = MEDIA_URL_ROOT + '/media/'
STATIC_URL = STATIC_URL_ROOT + '/static/'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Supported languages
_ = lambda s: s
LANGUAGES = (
( 'en', _( 'English' ) ),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Get site root (from current file) to make following path specifications easier
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
# Add paths to external & app folders to make including easier
sys.path.append(os.path.join(SITE_ROOT, 'apps'))
sys.path.append(os.path.join(SITE_ROOT, 'external'))
# Specific addition for MPTT as it supplies admin templates/tags
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates'),
)
ROOT_URLCONF = 'urls'
#CSRF_COOKIE_DOMAIN = 'golifescience.com'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u2y=71bj-k%-iubxq+gvtwo7__7#b2gr^^4ug)a4*uzy^c7d#m'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
if 'DJANGO_SETTINGS_MODULE' in os.environ: # We're in a live web session (via wsgi)
add_to_builtins('django.templatetags.future')
#add_to_builtins('core.templatetags.core_tags')
MIDDLEWARE_CLASSES = (
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware', # should be after SessionMiddleware and CacheMiddleware, if used
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pagination.middleware.PaginationMiddleware',
'core.http.Http403Middleware',
# 'subdomains.middleware.SubdomainURLRoutingMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.humanize',
'django.contrib.staticfiles',
# 'django.contrib.signals',
# Externals
'pagination', #http://code.google.com/p/django-pagination
'easy_thumbnails', #http://github.com/SmileyChris/easy-thumbnails
# 'south', # Database migrations: http://south.aeracode.org/
'memcache_status',
'taggit',
'markdown',
'haystack',
'jsonfield',
'mptt',
'hitcount',
'countries',
'django_markdown',
# 'subdomains',
# 'djangocalais' We're using the API interface provided by this, but not the models so leave inactive
'registration',
'postman',
'licenses',
'actstream',
'disqus',
# installables
'core',
# 'comments',
'ajax',
'testimonials',
'applications',
'blog',
'methods',
'tagmeta',
'publications',
'profiles',
'authors',
'showcase',
)
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/accounts/logout/'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_KEY_PREFIX = 'middleware_anon_cache_'
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
# External avatar app setting (storage under /media/avatar)
# The following paths are dependent on setting correct base MEDIA/MEDIAADMIN urls in localsettings
AVATAR_DEFAULT_URL = MEDIA_URL + "img/default_avatar.png"
KEY_PREFIX = 'cache_'
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
# Custom data
'context_processors.languages',
'context_processors.modelglobals',
'context_processors.site',
'context_processors.top5s',
'context_processors.showcase',
)
AUTH_PROFILE_MODULE = "profiles.userprofile"
MARKDOWN_EDITOR_SKIN = 'simple'
# Haystack configuration
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.xapian_backend.XapianEngine',
'PATH': os.path.join(SITE_ROOT, 'search_index'),
# 'TIMEOUT': 60 * 5,
# 'INCLUDE_SPELLING': True,
# 'BATCH_SIZE': 100,
# 'DEFAULT_OPERATOR': 'OR',
# 'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],
},
}
ACTSTREAM_ACTION_MODELS = (
'auth.User',
# 'auth.AnonymousUser',
'applications.Application',
'methods.Method',
'publications.Publication',
# 'questions.Question',
# 'questions.Answer',
# 'discuss.Forum',
# 'discuss.Thread',
# 'discuss.Post',
# 'badges.Badge',
# 'badges.BadgeToUser',
# 'sites.Site',
'blog.Article',
# 'comments.MPTTComment',
# 'comments.Comment',
)
ACCOUNT_ACTIVATION_DAYS = 30
REDIRECT_FIELD_NAME = 'next'
FORCE_LOWERCASE_TAGS = True
# Email settings: user/pass combination is stored in local settings for security
EMAIL_HOST = 'smtp.webfaction.com'
EMAIL_SUBJECT_PREFIX ='[golifescience.com] '
DEFAULT_FROM_EMAIL = 'noreply@golifescience.com'
SERVER_EMAIL = 'noreply@golifescience.com'
# Hit count
HITCOUNT_KEEP_HIT_ACTIVE = { 'days': 1 }
HITCOUNT_HITS_PER_IP_LIMIT = 0
#HITCOUNT_EXCLUDE_USER_GROUP = ( 'Admin', )
| mfitzp/django-golifescience | settings.py | Python | bsd-3-clause | 7,963 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-import sys
import re
import os
import time,datetime
from decimal import *
import mimetypes
from cStringIO import StringIO
import oauth2
import logging
from xml.sax import saxutils
import webapp2 as webapp
#from google.appengine.ext import webapp
from django.utils import translation
from django.conf import settings
from google.appengine.api import users
from lib.cookies import Cookies
import urllib
import controller
from controller.model import User
from controller.session import Session, TempSession
from django.template import Context, loader
from django.conf import settings
def encoded_urlencode(params,encode='utf-8'):
"""
unicode文字列をエンコードしてからURLエンコード
"""
p = {}
for k,v in params.items():
p[k] = v.encode(encode)
query = urllib.urlencode(p)
return query
class template(object):
@staticmethod
def render(path,params):
template = loader.get_template(path)
context = Context(params)
return template.render(context)
class BaseHandler(webapp.RequestHandler):
def initialize(self, request, response):
webapp.RequestHandler.initialize(self, request, response)
lang = request.get('lang')
if lang:
translation.activate(lang)
return
#main.pyでDBに保存された言語設定をクッキーに入れている。それを取得。Ajaxリクエスト時に使用
lang = Cookies(self).get('lang')
if not lang:
#なければリクエストヘッダーから取得する
self.request.COOKIES = Cookies(self)
self.request.META = os.environ
lang = translation.get_language_from_request(self.request)
translation.activate(lang)
self.is_ajax = self.request.headers.get("X-Requested-With") == "XMLHttpRequest"
self.is_mobile = False
# if not self.is_ajax:
# mobile_useragents = r'iPhone|iPod|Android|dream|CUPCAKE|BlackBerry|webOS|incognito|webmate'
# user_agent = self.request.headers["user-agent"]
# self.is_mobile = re.search(mobile_useragents, user_agent) is not None
def handle_exception(self, exception, debug_mode):
if isinstance(exception, NotLoginError):
if self.is_ajax:
self.error(403)
return self.response.out.write("notlogin")
#
self.request.COOKIES = Cookies(self)
self.request.META = os.environ
lang = translation.get_language_from_request(self.request)
if self.is_mobile:
view = '../view/m_index.html'
elif lang == "ja":
view = '../view/index.html'
else:
view = '../view/index-en.html'
template_values = {
'version' : controller.version,
'production' : not controller.is_dev,
'settings' : settings,
}
tmpl = os.path.join(os.path.dirname(__file__), view)
return self.response.out.write(template.render(tmpl, template_values))
logging.exception(exception)
self.error(500)
if self.is_ajax:
return self.response.out.write("error")
tmpl = os.path.join(os.path.dirname(__file__), '../view/500.html')
return self.response.out.write(template.render(tmpl, {}))
def check_login(self):
# if self.check_2lo_oauth():
# return
self.session = Session(self.request, self.response)
if not self.session.is_login():
raise NotLoginError()
def check_2lo_oauth(self):
auth_header = self.request.headers.get("Authorization")
if not auth_header:
return False
self.is_ajax = True
user_id = self.request.get('xoauth_requestor_id')
if not user_id:
raise NotLoginError()
try:
# Builder our request object.
request = oauth2.Request.from_request(
self.request.method, self.request.path_url, self.request.headers, None,
self.request.query)
except Exception, e:
logging.warn("Could not parse request from method = %s,"
"uri = %s, headers = %s, query = %s, exception = %s" % (
self.request.method, self.request.path_url, self.request.headers,
self.request.query, e))
raise NotLoginError()
# Fetch the token from Cassandra and build our Consumer object.
if request is None or 'oauth_consumer_key' not in request:
logging.warn("Request is missing oauth_consumer_key.")
raise NotLoginError()
try:
# Verify the two-legged request.
server = oauth2.Server()
server.add_signature_method(oauth2.SignatureMethod_HMAC_SHA1())
server.verify_request(request, _get_consumer(request["oauth_consumer_key"]), None)
except Exception, e:
logging.warn("Could not verify signature (%s)." % e)
raise NotLoginError()
user = User.gql("WHERE user_id=:1", user_id).get()
if not user:
logging.warn("Specified user is not found. (%s)" % user_id)
raise NotLoginError()
session = TempSession(self.request, self.response)
session.new(user)
self.session = session
return True
def need_login(fn):
def check_login(_self, *args, **kw):
_self.check_login()
return fn(_self, *args, **kw)
return check_login
class NotLoginError(Exception):
pass
class MockConsumer(object):
key = 'nc-3b0shbl19gm:e^w/xbtspng7e'
secret = '/n@g93nc-.f]h^4hf1gs8cnvlg04.lz/g_n9573ffd0-b.ukwpq-a,f;-nt2vd91,dng'
# key = 'key'
# secret = 'secret'
def _get_consumer(key):
return MockConsumer()
#-----------------------------------------------------------
#URL,ユーザー名のリンク化
#-----------------------------------------------------------
def escape_html(str):
return saxutils.escape(str)
def replace_link(str, url="") :
#URL
str = re.sub("(http[s]?://[\w|/|\.|%|&|\?|=|\-|#|!|:|;|~]+)", r'<a href="\1" target="_blank">\1</a>', str)
return replace_mention(str, url)
def replace_mention(str, url=""):
#ユーザー名
str = re.sub("@([\w|/|\.|%|&|\?|=|\-|#]+)", r'@<a href="%s\1" target="_blank">\1</a>' % url, str)
#改行
str = re.sub("\n", '<br/>', str)
return str
# ハッシュタグのリンク化
def replace_hashtag(str, url="") :
str = " "+str+" "
#str = re.sub("([^\w|/|\.|%|&|\?|=|\-|\_])\#([\w|/|\.|%|&|\?|=|\-|\_]+)", r'\1<a href="%s%%23\2" target="_blank">#\2</a>' % url, str)
str = re.sub(u"(?:#|\uFF03)([a-zA-Z0-9_\u3041-\u3094\u3099-\u309C\u30A1-\u30FA\u30FC\u3400-\uD7FF\uFF10-\uFF19\uFF20-\uFF3A\uFF41-\uFF5A\uFF66-\uFF9F]+)", r'<a href="%s%%23\1" target="_blank">#\1</a>' % url, str)
return str
#-----------------------------------------------------------
#時間差を計算して時刻の文字列を生成
#-----------------------------------------------------------
def get_display_time(datetimeStr, format) :
dtTarget = datetime.datetime.strptime(datetimeStr, format)
return dtTarget.strftime("%a %b %d %H:%M:%S %Y")
def multipart_encode(params, files, boundary):
CRLF = '\r\n'
encode_string = StringIO()
for (key, value) in params.items():
encode_string.write('--' + boundary)
encode_string.write(CRLF)
encode_string.write('Content-Disposition: form-data; name="%s"' % key)
encode_string.write(CRLF)
encode_string.write(CRLF)
encode_string.write(value.decode('utf-8'))
encode_string.write(CRLF)
for (key, filename, value) in files:
encode_string.write('--' + boundary)
encode_string.write(CRLF)
encode_string.write('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
encode_string.write(CRLF)
encode_string.write('Content-Type: %s' % (mimetypes.guess_type(filename)[0] or 'application/octet-stream'))
encode_string.write(CRLF)
encode_string.write(CRLF)
encode_string.write(value.read())
encode_string.write(CRLF)
encode_string.write('--' + boundary + '--')
encode_string.write(CRLF)
encode_string.write(CRLF)
body = encode_string.getvalue()
return body
| co-meeting/crowy | src/controller/utils.py | Python | mit | 8,884 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0049_auto_20171116_1526'),
]
operations = [
migrations.AlterField(
model_name='facet',
name='story',
field=models.ForeignKey(related_name='storyfacet', to='editorial.Story'),
),
]
| ProjectFacet/facet | project/editorial/migrations/0050_auto_20171117_1716.py | Python | mit | 437 |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities related to bgp data types and models.
"""
import logging
import socket
from ryu.lib.packet.bgp import (
BGPUpdate,
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_RTC_UC,
RouteTargetMembershipNLRI,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGPPathAttributeMultiExitDisc,
BGPPathAttributeMpUnreachNLRI,
BGPPathAttributeAs4Path,
BGPPathAttributeAs4Aggregator,
BGPPathAttributeUnknown,
BGP_ATTR_FLAG_OPTIONAL,
BGP_ATTR_FLAG_TRANSITIVE,
)
from ryu.services.protocols.bgp.info_base.rtc import RtcPath
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
LOG = logging.getLogger('utils.bgp')
# RouteFmaily to path sub-class mapping.
_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_UC: Ipv4Path,
RF_IPv6_UC: Ipv6Path,
RF_IPv4_VPN: Vpnv4Path,
RF_IPv6_VPN: Vpnv6Path,
RF_L2_EVPN: EvpnPath,
RF_RTC_UC: RtcPath}
def create_path(src_peer, nlri, **kwargs):
route_family = nlri.ROUTE_FAMILY
assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
def clone_path_and_update_med_for_target_neighbor(path, med):
assert path and med
route_family = path.route_family
if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
raise ValueError('Clone is not supported for address-family %s' %
route_family)
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
pattrs = path.pathattr_map
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = BGPPathAttributeMultiExitDisc(med)
return path_cls(
path.source, path.nlri, path.source_version_num,
pattrs=pattrs, nexthop=path.nexthop,
is_withdraw=path.is_withdraw,
med_set_by_target_neighbor=True
)
def clone_rtcpath_update_rt_as(path, new_rt_as):
"""Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
Parameters:
- `path`: (Path) RT_NLRI path
- `new_rt_as`: AS value of cloned paths' RT_NLRI
"""
assert path and new_rt_as
if not path or path.route_family != RF_RTC_UC:
raise ValueError('Expected RT_NLRI path')
old_nlri = path.nlri
new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)
return RtcPath(path.source, new_rt_nlri, path.source_version_num,
pattrs=path.pathattr_map, nexthop=path.nexthop,
is_withdraw=path.is_withdraw)
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
packed_byte = socket.inet_pton(socket.AF_INET, bgp_id)
four_byte_id = int(packed_byte.encode('hex'), 16)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id
def get_unknown_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown and unsupported optional
transitive path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.flags & (BGP_ATTR_FLAG_OPTIONAL |
BGP_ATTR_FLAG_TRANSITIVE)) or \
isinstance(attr, BGPPathAttributeAs4Path) or \
isinstance(attr, BGPPathAttributeAs4Aggregator):
unknown_opt_tran_attrs[attr.type] = attr
return unknown_opt_tran_attrs
def create_end_of_rib_update():
"""Construct end-of-rib (EOR) Update instance."""
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(RF_IPv4_VPN.afi,
RF_IPv4_VPN.safi,
[])
eor = BGPUpdate(path_attributes=[mpunreach_attr])
return eor
# Bgp update message instance that can used as End of RIB marker.
UPDATE_EOR = create_end_of_rib_update()
| ool2016-seclab/quarantineSystem | ryu/services/protocols/bgp/utils/bgp.py | Python | mit | 5,129 |
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import permissions
from rest_framework import status
from rest_framework import viewsets
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from user_api.serializers import UserSerializer, UserPreferenceSerializer
from user_api.models import UserPreference
from django_comment_common.models import Role
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class ApiKeyHeaderPermission(permissions.BasePermission):
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
If settings.DEBUG is True and settings.EDX_API_KEY is not set or None,
then allow the request. Otherwise, allow the request if and only if
settings.EDX_API_KEY is set and the X-Edx-Api-Key HTTP header is
present in the request and matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
return (
(settings.DEBUG and api_key is None) or
(api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key)
)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class ForumRoleUsersListView(generics.ListAPIView):
"""
Forum roles are represented by a list of user dicts
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
"""
Return a list of users with the specified role/course pair
"""
name = self.kwargs['name']
course_id_string = self.request.QUERY_PARAMS.get('course_id')
if not course_id_string:
raise ParseError('course_id must be specified')
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
role = Role.objects.get_or_create(course_id=course_id, name=name)[0]
users = role.users.all()
return users
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
| carsongee/edx-platform | common/djangoapps/user_api/views.py | Python | agpl-3.0 | 3,368 |
import os
from . import model
from . import routes
from . import views
MODELS = [model.AddonS3UserSettings, model.AddonS3NodeSettings]
USER_SETTINGS_MODEL = model.AddonS3UserSettings
NODE_SETTINGS_MODEL = model.AddonS3NodeSettings
ROUTES = [routes.settings_routes]
SHORT_NAME = 's3'
FULL_NAME = 'Amazon S3'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['accounts', 'node']
CATEGORIES = ['storage']
INCLUDE_JS = {}
INCLUDE_CSS = {
'widget': [],
'page': [],
}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.s3_hgrid_data
# 1024 ** 1024 # There really shouldnt be a limit...
MAX_FILE_SIZE = 128 # MB
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_node_settings.mako')
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_user_settings.mako')
| njantrania/osf.io | website/addons/s3/__init__.py | Python | apache-2.0 | 881 |
import datetime
import pdb
import unittest
class TransactionId2(object):
def __init__(self, sale_date=None, apn=None):
assert sale_date is not None, sale_date
assert apn is not None, apn
assert isinstance(sale_date, datetime.date), sale_date
assert isinstance(apn, long), apn
self.sale_date = sale_date
self.apn = apn
def __str__(self):
'return informal string representation'
return '%sAPN%d' % (self.sale_date, self.apn)
def __repr__(self):
'return official string representation'
return 'TransactionId2(sale_date=%s,apn=%sL)' % (self.sale_date, self.apn)
def __eq__(self, other):
return self.sale_date == other.sale_date and self.apn == other.apn
def __lt__(self, other):
if self.sale_date == other.sale_date:
return self.apn < other.apn
else:
return self.sale_date < other.sale_date
def __hash__(self):
return hash((self.sale_date, self.apn))
class TestTransactionId2(unittest.TestCase):
def test_construction_ok(self):
for test in (
[2001, 2, 3, 10],
):
year, month, day, apn = test
date_value = datetime.date(year, month, day)
apn_value = long(apn)
x = TransactionId2(sale_date=date_value, apn=apn_value)
self.assertTrue(isinstance(x, TransactionId2))
self.assertEqual(date_value, x.sale_date)
self.assertEqual(apn_value, x.apn)
def test_construction_bad(self):
def make(year, month, day, apn):
return TransactionId2(sale_date=year * 10000 + month * 100 + day, apn=apn)
for test in (
[2001, 2, 3, 10],
):
year, month, day, apn = test
self.assertRaises(AssertionError, make, year, month, day, apn)
def test_str_repr(self):
verbose = True
for test in (
[2001, 2, 3, 10],
):
year, month, day, apn = test
date_value = datetime.date(year, month, day)
apn_value = long(apn)
x = TransactionId2(sale_date=date_value, apn=apn_value)
if verbose:
print x # calls __str__
print x.__repr__()
def test_eq(self):
a1 = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=10L)
a2 = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=10L)
b = TransactionId2(sale_date=datetime.date(2001, 2, 4), apn=10L)
c = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=11L)
self.assertEqual(a1, a2)
self.assertNotEqual(a1, b)
self.assertNotEqual(a1, c)
def test_lt(self):
a = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=10L)
b = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=11L)
c = TransactionId2(sale_date=datetime.date(2001, 2, 5), apn=10L)
self.assertLess(a, b)
self.assertLess(a, c)
self.assertLess(b, c)
def test_has(self):
'test by making a set'
a = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=10L)
b = TransactionId2(sale_date=datetime.date(2001, 2, 3), apn=11L)
c = TransactionId2(sale_date=datetime.date(2001, 2, 5), apn=10L)
x = set((a, b, c))
self.assertEqual(3, len(x))
if __name__ == '__main__':
unittest.main()
if False:
pdb
| rlowrance/re-avm | TransactionId2.py | Python | bsd-3-clause | 3,449 |
import logging
from autotest.client.shared import error
@error.context_aware
def run(test, params, env):
"""
KVM virtio viostor heavy random write load:
1) Log into a guest
2) Install Crystal Disk Mark [1]
3) Start Crystal Disk Mark with heavy write load
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
:note: Crystal Disk mark is BSD licensed software
http://crystalmark.info/software/CrystalDiskMark/manual-en/License.html
:see:: http://crystalmark.info/software/CrystalDiskMark/index-e.html
"""
error.context("Try to log into guest.", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
crystal_install_cmd = params.get("crystal_install_cmd")
crystal_run_cmd = params.get("crystal_run_cmd")
test_timeout = float(params.get("test_timeout", "7200"))
error.context("Install Crystal Disk Mark", logging.info)
if crystal_install_cmd:
session.cmd(crystal_install_cmd, timeout=test_timeout)
else:
raise error.TestError("Can not get the crystal disk mark"
" install command.")
error.context("Start the write load", logging.info)
if crystal_run_cmd:
session.cmd(crystal_run_cmd, timeout=test_timeout)
else:
raise error.TestError("Can not get the load start command.")
session.close()
| ypu/tp-qemu | qemu/tests/win_disk_write.py | Python | gpl-2.0 | 1,545 |
# Licensed to Tomaz Muraus under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Tomaz muraus licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scrapy.contrib.spiders import CrawlSpider
__all__ = [
'FlickrCrawlSpider'
]
class FlickrCrawlSpider(CrawlSpider):
def __init__(self, *args, **kwargs):
flickr_api_key = kwargs.pop('flickr_api_key', None)
self._flickr_api_key = flickr_api_key
super(FlickrCrawlSpider, self).__init__(*args, **kwargs)
| Wadodo/wadodo-crawlers | wadodo_crawlers/wadodo_crawlers/spiders/base.py | Python | apache-2.0 | 1,102 |
# -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# © 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, fields, models, _
from openerp.tools.float_utils import float_compare
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
@api.multi
def wkf_confirm_order(self):
"""Create possible product variants not yet created."""
product_obj = self.env['product.product']
for line in self.mapped('order_line').filtered(
lambda x: not x.product_id and x.product_tmpl_id):
product = product_obj._product_find(
line.product_tmpl_id, line.product_attribute_ids)
if not product:
product = product_obj.create({
'product_tmpl_id': line.product_tmpl_id.id,
'attribute_value_ids':
[(6, 0,
line.product_attribute_ids.mapped('value_id').ids)]})
line.write({'product_id': product.id})
return super(PurchaseOrder, self).wkf_confirm_order()
class PurchaseOrderLine(models.Model):
_inherit = ['purchase.order.line', 'product.configurator']
_name = 'purchase.order.line'
order_state = fields.Selection(
related='order_id.state', readonly=True)
# Needed for getting the lang variable for translating descriptions
partner_id = fields.Many2one(related='order_id.partner_id', readonly=True)
@api.multi
def action_duplicate(self):
self.ensure_one()
self.copy()
# Force reload of view as a workaround for lp:1155525
return {
'context': self.env.context,
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'purchase.order',
'res_id': self.order_id.id,
'type': 'ir.actions.act_window',
}
@api.multi
def onchange_product_id(
self, pricelist_id, product_id, qty, uom_id, partner_id,
date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft'):
res = super(PurchaseOrderLine, self).onchange_product_id(
pricelist_id, product_id, qty, uom_id, partner_id,
date_order=date_order, fiscal_position_id=fiscal_position_id,
date_planned=date_planned, name=name, price_unit=price_unit,
state=state)
new_value = self.onchange_product_id_product_configurator_old_api(
product_id=product_id, partner_id=partner_id)
value = res.setdefault('value', {})
value.update(new_value)
if product_id:
product_obj = self.env['product.product']
if partner_id:
partner = self.env['res.partner'].browse(partner_id)
product_obj = product_obj.with_context(lang=partner.lang)
prod = product_obj.browse(product_id)
if prod.description_purchase:
value['name'] += '\n' + prod.description_purchase
return res
@api.multi
@api.onchange('product_tmpl_id')
def onchange_product_tmpl_id(self):
res = super(PurchaseOrderLine, self).onchange_product_tmpl_id()
if self.product_tmpl_id.description_purchase:
self.name += '\n' + self.product_tmpl_id.description_purchase
if self.product_tmpl_id.attribute_line_ids:
self.product_uom = self.product_tmpl_id.uom_po_id
self.product_uos = self.product_tmpl_id.uos_id
self.price_unit = self.order_id.pricelist_id.with_context(
{'uom': self.product_uom.id,
'date': self.order_id.date_order}).template_price_get(
self.product_tmpl_id.id, self.product_qty or 1.0,
self.order_id.partner_id.id)[self.order_id.pricelist_id.id]
# Get planned date and min quantity
supplierinfo = False
precision = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
for supplier in self.product_tmpl_id.seller_ids:
if supplier.name == self.order_id.partner_id:
supplierinfo = supplier
if supplierinfo.product_uom != self.product_uom:
res['warning'] = {
'title': _('Warning!'),
'message': _('The selected supplier only sells this '
'product by %s') % (
supplierinfo.product_uom.name)
}
min_qty = supplierinfo.product_uom._compute_qty(
supplierinfo.product_uom.id, supplierinfo.min_qty,
to_uom_id=self.product_uom.id)
# If the supplier quantity is greater than entered from user,
# set minimal.
if (float_compare(
min_qty, self.product_qty,
precision_digits=precision) == 1):
if self.product_qty:
res['warning'] = {
'title': _('Warning!'),
'message': _('The selected supplier has a minimal '
'quantity set to %s %s, you should '
'not purchase less.') % (
supplierinfo.min_qty,
supplierinfo.product_uom.name)
}
self.product_qty = min_qty
if not self.date_planned and supplierinfo:
dt = fields.Datetime.to_string(
self._get_date_planned(supplierinfo, self.order_id.date_order))
self.date_planned = dt
# Get taxes
taxes = self.product_tmpl_id.supplier_taxes_id
self.taxes_id = self.order_id.fiscal_position.map_tax(taxes)
return res
| Eficent/odoomrp-wip | purchase_product_variants/models/purchase_order.py | Python | agpl-3.0 | 5,997 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-17 15:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flows', '0082_install_indexes'),
]
operations = [
migrations.CreateModel(
name='FlowPathRecentStep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_uuid', models.UUIDField(help_text='Which flow node they came from')),
('to_uuid', models.UUIDField(help_text='Which flow node they went to')),
('left_on', models.DateTimeField(help_text='When they left the first node')),
('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recent_segments', to='flows.FlowStep')),
],
),
migrations.RunSQL('CREATE INDEX flows_flowpathrecentstep_from_to_left '
'ON flows_flowpathrecentstep (from_uuid, to_uuid, left_on DESC)')
]
| pulilab/rapidpro | temba/flows/migrations/0083_flowpathrecentstep.py | Python | agpl-3.0 | 1,150 |
import bpy
op = bpy.context.active_operator
op.x_eq = '1.2*(1 -v/(2*pi))*cos(3*v)*(1 + cos(u)) + 3*cos(3*v)'
op.y_eq = '9*v/(2*pi) + 1.2*(1 - v/(2*pi))*sin(u)'
op.z_eq = '1.2*(1 -v/(2*pi))*sin(3*v)*(1 + cos(u)) + 3*sin(3*v)'
op.range_u_min = 0.0
op.range_u_max = 6.2831854820251465
op.range_u_step = 32
op.wrap_u = False
op.range_v_min = 0.0
op.range_v_max = 6.2831854820251465
op.range_v_step = 64
op.wrap_v = False
op.close_v = False
op.n_eq = 1
op.a_eq = '0'
op.b_eq = '0'
op.c_eq = '0'
op.f_eq = '0'
op.g_eq = '0'
op.h_eq = '0'
| Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/presets/operator/mesh.primitive_xyz_function_surface/snake.py | Python | gpl-3.0 | 533 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-few-public-methods
import json
import unittest
import jmespath
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from azure.cli.core.azclierror import (ResourceNotFoundError, ArgumentUsageError, InvalidArgumentValueError,
MutuallyExclusiveArgumentError)
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck)
from knack.cli import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
WINDOWS_ASP_LOCATION_WEBAPP = 'japanwest'
WINDOWS_ASP_LOCATION_FUNCTIONAPP = 'francecentral'
LINUX_ASP_LOCATION_WEBAPP = 'eastus2'
LINUX_ASP_LOCATION_FUNCTIONAPP = 'ukwest'
class WebAppAccessRestrictionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_show(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction show -g {rg} -n {app_name}', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('length(ipSecurityRestrictions)', 1),
JMESPathCheck('ipSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('ipSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('length(scmIpSecurityRestrictions)', 1),
JMESPathCheck('scmIpSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('scmIpSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_set_simple(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site true', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', True)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_set_complex(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', True)
])
self.cmd('webapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site false', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add_ip_address_validation(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name ipv4 --action Allow --ip-address 130.220.0.0 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'ipv4'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[0].ipAddress', '130.220.0.0/32'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name ipv6 --action Allow --ip-address 2004::1000 --priority 200', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('[1].name', 'ipv6'),
JMESPathCheck('[1].action', 'Allow'),
JMESPathCheck('[1].ipAddress', '2004::1000/128')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name multi-source --action Allow --ip-address "2004::1000/120,192.168.0.0/24" --priority 200', checks=[
JMESPathCheck('length(@)', 4),
JMESPathCheck('[2].name', 'multi-source'),
JMESPathCheck('[2].action', 'Allow'),
JMESPathCheck('[2].ipAddress', '2004::1000/120,192.168.0.0/24')
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add_service_tag_validation(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name afd --action Allow --service-tag AzureFrontDoor.Backend --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'afd'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[0].ipAddress', 'AzureFrontDoor.Backend'),
JMESPathCheck('[0].tag', 'ServiceTag'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name europe --action Allow --service-tag "AzureCloud.WestEurope,AzureCloud.NorthEurope" --priority 300', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('[1].name', 'europe'),
JMESPathCheck('[1].action', 'Allow'),
JMESPathCheck('[1].ipAddress', 'AzureCloud.WestEurope,AzureCloud.NorthEurope')
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add_http_header(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name} --is-linux')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name} --runtime "DOTNETCORE|3.1"', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name afd --action Allow --service-tag AzureFrontDoor.Backend --priority 200 --http-header x-azure-fdid=12345678-abcd-1234-abcd-12345678910a', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'afd'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[0].ipAddress', 'AzureFrontDoor.Backend'),
JMESPathCheck('[0].tag', 'ServiceTag'),
JMESPathCheck('length([0].headers)', 1),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --service-tag AzureFrontDoor.Backend', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name afd-extended --action Allow --service-tag AzureFrontDoor.Backend --priority 200 --http-header x-azure-fdid=12345678-abcd-1234-abcd-12345678910a x-azure-FDID=next-id x-forwarded-host=contoso.com', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'afd-extended'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[0].ipAddress', 'AzureFrontDoor.Backend'),
JMESPathCheck('[0].tag', 'ServiceTag'),
JMESPathCheck('length([0].headers)', 2),
JMESPathCheck('length([0].headers.\"x-azure-fdid\")', 2),
JMESPathCheck('length([0].headers.\"x-forwarded-host\")', 1)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add_service_endpoint(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24),
'vnet_name': self.create_random_name(prefix='cli-vnet-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('az network vnet create -g {rg} -n {vnet_name} --address-prefixes 10.0.0.0/16 --subnet-name endpoint-subnet --subnet-prefixes 10.0.0.0/24', checks=[
JMESPathCheck('subnets[0].serviceEndpoints', None)
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name vnet-integration --action Allow --vnet-name {vnet_name} --subnet endpoint-subnet --priority 150', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'vnet-integration'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_remove(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --rule-name developers', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_mixed_remove(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24),
'vnet_name': self.create_random_name(prefix='cli-vnet-nwr', length=24),
'ip_address': '130.220.0.0/27',
'service_tag': 'AzureFrontDoor.Backend'
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('az network vnet create -g {rg} -n {vnet_name} --address-prefixes 10.0.0.0/16 --subnet-name endpoint-subnet --subnet-prefixes 10.0.0.0/24', checks=[
JMESPathCheck('subnets[0].serviceEndpoints', None)
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address {ip_address} --priority 100', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name vnet-integration --action Allow --vnet-name {vnet_name} --subnet endpoint-subnet --priority 150', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('[1].name', 'vnet-integration'),
JMESPathCheck('[1].action', 'Allow'),
JMESPathCheck('[2].name', 'Deny all'),
JMESPathCheck('[2].action', 'Deny')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name afd --action Allow --service-tag {service_tag} --priority 200 --http-header x-azure-fdid=12345678-abcd-1234-abcd-12345678910a', checks=[
JMESPathCheck('length(@)', 4),
JMESPathCheck('[2].name', 'afd'),
JMESPathCheck('[2].action', 'Allow'),
JMESPathCheck('[2].ipAddress', 'AzureFrontDoor.Backend'),
JMESPathCheck('[2].tag', 'ServiceTag'),
JMESPathCheck('length([2].headers)', 1),
JMESPathCheck('[3].name', 'Deny all'),
JMESPathCheck('[3].action', 'Deny')
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --vnet-name {vnet_name} --subnet endpoint-subnet', checks=[
JMESPathCheck('length(@)', 3)
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --ip-address {ip_address}', checks=[
JMESPathCheck('length(@)', 2)
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --service-tag {service_tag}', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_add_scm(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --scm-site', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_remove_scm(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --scm-site', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('webapp config access-restriction remove -g {rg} -n {app_name} --rule-name developers --scm-site', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_access_restriction_slot(self, resource_group):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-webapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24),
'slot_name': 'stage'
})
self.cmd('appservice plan create -g {rg} -n {plan_name} --sku S1')
self.cmd('webapp create -g {rg} -n {app_name} --plan {plan_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp deployment slot create -g {rg} -n {app_name} --slot {slot_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('webapp config access-restriction show -g {rg} -n {app_name} --slot {slot_name}', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('length(ipSecurityRestrictions)', 1),
JMESPathCheck('ipSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('ipSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('length(scmIpSecurityRestrictions)', 1),
JMESPathCheck('scmIpSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('scmIpSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
self.cmd('webapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --slot {slot_name}', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
if __name__ == '__main__':
unittest.main()
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_webapp_access_restriction_commands.py | Python | mit | 20,734 |
class Solution:
# @param digits, a list of integer digits
# @return a list of integer digits
def plusOne(self, digits):
result=[]
length = len(digits)
if length ==0:
return result
plus = 0
p = False
n = digits.pop()
length -= 1
n += 1
if n>= 10:
plus = 1
n -= 10
result.append(n)
while(length>0):
n = digits.pop()
if plus>0:
n += plus
plus = 0
if n>= 10:
plus = 1
n -= 10
length -= 1
result.append(n)
if plus > 0:
result.append(1)
result.reverse()
return result
s = Solution()
print s.plusOne([1,2])
print s.plusOne([9,9]) | shootsoft/practice | LeetCode/python/061-090/066-plus-one/plus1.py | Python | apache-2.0 | 825 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("twitter", "0003_auto_20150730_1112"),
]
operations = [
migrations.AlterField(
model_name="user",
name="favorites_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of tweets this user has favorited in the account\xe2\x80\x99s lifetime", # noqa: E501
),
),
migrations.AlterField(
model_name="user",
name="followers_count",
field=models.PositiveIntegerField(
default=0, help_text=b"The number of followers this account has"
),
),
migrations.AlterField(
model_name="user",
name="friends_count",
field=models.PositiveIntegerField(
default=0, help_text=b"Tne number of users this account is following."
),
),
migrations.AlterField(
model_name="user",
name="listed_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of public lists this user is a member of",
),
),
migrations.AlterField(
model_name="user",
name="statuses_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of tweets, including retweets, by this user",
),
),
]
| philgyford/django-ditto | ditto/twitter/migrations/0004_auto_20150730_1116.py | Python | mit | 1,640 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import knowledge_repo
# -- Project information -----------------------------------------------------
project = 'Knowledge Repo'
copyright = '2018, Airbnb and contributors'
author = 'Airbnb and contributors'
# The short X.Y version
version = 'v' + knowledge_repo.__version__.split('_')[0]
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KnowledgeRepodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KnowledgeRepo.tex', 'Knowledge Repo Documentation',
'Airbnb and contributors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'knowledgerepo', 'Knowledge Repo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KnowledgeRepo', 'Knowledge Repo Documentation',
author, 'KnowledgeRepo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| airbnb/knowledge-repo | docs/conf.py | Python | apache-2.0 | 5,149 |
# Copyright 2019 - Nokia Corporation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = [
cfg.StrOpt('notifier',
default='vitrage.notifier.plugins.zaqar.'
'zaqar_notifier.ZaqarNotifier',
help='zaqar notifier class path',
required=True),
cfg.StrOpt('queue',
default='alarms',
help='zaqar queue to post messages',
required=True),
]
| openstack/vitrage | vitrage/notifier/plugins/zaqar/__init__.py | Python | apache-2.0 | 1,004 |
##Ladder 'name', 'game', 'size'
##Events': [ ]
##'Players': { 'name': elo}
##
##Event {'date': x, 'Base': { }, 'size': x, 'Sets': [ ]}
##Set = {'P1': 's', 'P2': 's', 'Matches': [ ]
##Match = [WINNER, char1, char2, Stage]
##
##PlayerIDS
##{'name': id}
##
##Players {'PIDs': { }, 'Data': [ ]}
##Data: {'last': EID, 'Ladders': [ ]}
##
##Ladder = {'LID': x, 'played': x, 'Scores': { }}
##Score = 'Opp': {'last': x, 'W': x, 'L': x}
##
##Queue {'Event': LID' 'setups': x, 'InProg': [ ], 'Queue': []}
##InProg {'P1': 's', 'P2': 's'}
##
##Reserved: { 'name': qid }
import json
import datetime
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "1";
# return render_template('app/index.html')
if __name__ == "__main__":
app.run()
path = "C:/Users/admin/Documents/smash/"
LADDERS = []
PLAYERS = {}
PLDATA = []
QUEUES = []
RESERVED = { }
EVENT = 0
VARI = 32
SEARCH = 2
BASEELO = 1200
##Players {'PIDs': { }, 'Data': [ ]}
def load():
f = open(path + 'ladder.jsn')
LADDERS.extend(json.load(f))
f.close()
f = open(path + 'player.jsn')
player = json.load(f)
PLAYERS.update(player['PIDs'])
PLDATA.extend(player['Data'])
f.close()
def save():
f = open(path + 'ladder.jsn', mode='w+')
json.dump(LADDERS, f, indent=2)
f.close()
player = {'PIDs': PLAYERS, 'Data': PLDATA}
f = open(path + 'player.jsn', mode='w+')
json.dump(player, f, indent=2)
f.close()
##Ladder 'name', 'game' 'size' Events': [ ] 'Players': { }
def newLadder(name, game):
ladder = {'Ladder': name, 'game': game, 'size': 0, 'Events': [], 'Players': {} }
LADDERS.append(ladder)
##PlayerIDS {'name': id}
##Players {'PIDs': { }, 'Data': [ ]}
##Data: {'last': EID, 'Ladders': [ ]}
def newPlayer(name):
newid = len(PLAYERS)
PLAYERS[name] = newid
data = {'last': -1, 'Ladders': []}
PLDATA.append(data)
def newEntrant(name):
newPlayer(name)
for x in range (len(LADDERS)):
addPlayer(name, x, BASEELO)
##Ladder 'name', 'game' 'size' Events': [ ] 'Players': { }
##'Players': { 'name': elo}
##Ladder = {'LID': x, 'played': x, 'Scores': { }}
def addPlayer(name, lid, elo):
LADDERS[lid]['Players'][name] = elo
LADDERS[lid]['size'] += 1
lad = {'LID': lid, 'played': 0, 'Scores': {}}
PLDATA[PLAYERS[name]]['Ladders'].append(lad)
##Queue {'Event': LID' 'setups': x, 'InProg': [ ], 'Queue': []}
##InProg {'P1': 's', 'P2': 's'}
##Event {'date': x, 'Base': { }, 'size': x, 'Sets': [ ]}
##Reserved: { 'name': qid }
def newEvent(lid, setups):
newQueue(lid, setups)
event = {'Date': datetime.date.today().isoformat(), 'Base': dict(LADDERS[lid]['Players']), 'size': 0, 'Sets': []}
LADDERS[lid]['Events'].append(event)
def newQueue(lid, setups):
queue = {'Event': lid, 'setups': setups, 'InProg': [], 'Queue': []}
QUEUES.append(queue)
##Ladder 'name', 'game' 'size' Events': [ ] 'Players': { }
def fillQueue(qid):
lid = QUEUES[qid]['Event']
size = LADDERS[lid]['size']
for x in LADDERS[lid]['Players'].keys():
addToQueue(qid, x)
def addToQueue(qid, name):
QUEUES[qid]['Queue'].append(name)
def printQueue(qid):
lid = QUEUES[qid]['Event']
print("Queue for:", LADDERS[lid]['Ladder'])
for x in QUEUES[qid]['InProg']:
print("In Progress:", x['P1'], "vs", x['P2'])
s = 'Queue: '
for x in QUEUES[qid]['Queue']:
s += x + ", "
print(s)
def findPlayerScore(name, lid):
pid = PLAYERS[name]
data = PLDATA[pid]['Ladders']
for x in range(0, len(data)):
if(data[x]['LID'] == lid):
return x
return -1
def getPlayerElo(lid, name):
return LADDERS[lid]['Players'][name]
##Event {'date': x, 'Base': { }, 'size': x, 'Sets': [ ]}
##Set = {'P1': 's', 'P2': 's', 'Matches': [ ]
##Match = [WINNER, char1, char2, Stage]
##Ladder = {'LID': x, 'played': x, 'Scores': { } }
##Score = {'last': x, 'W': x, 'L': x}
def setResult(qid, progid, matches):
prog = QUEUES[qid]['InProg'][progid]
lid = QUEUES[qid]['Event']
#record match in event
aset = {'P1': prog['P1'], 'P2': prog['P2'], 'Matches': matches}
eid = len(LADDERS[lid]['Events']) - 1
LADDERS[lid]['Events'][eid]['Sets'].append(aset)
LADDERS[lid]['Events'][eid]['size'] += 1
#update ladder scores between both players
sc1 = 0
sc2 = 0
for x in matches:
if(x[0] == 0):
sc1 += 1
else:
sc2 += 1
updateScore(lid, prog['P1'], prog['P2'], sc1, sc2)
updateScore(lid, prog['P2'], prog['P1'], sc2, sc1)
#change elos based on results
one = LADDERS[lid]['Players'][prog['P1']]
two = LADDERS[lid]['Players'][prog['P2']]
ch1 = (one / (one + two)) * VARI * sc1
ch2 = (two / (one + two)) * VARI * sc2
LADDERS[lid]['Players'][prog['P1']] += ch1
LADDERS[lid]['Players'][prog['P2']] -= ch1
LADDERS[lid]['Players'][prog['P1']] -= ch2
LADDERS[lid]['Players'][prog['P2']] += ch2
#readd to queue
QUEUES[qid]['Queue'].append(prog['P1'])
QUEUES[qid]['Queue'].append(prog['P2'])
del QUEUES[qid]['InProg'][progid]
##Data: {'last': EID, 'Ladders': [ ]}
##
##Ladder = {'LID': x, 'played': x, 'Scores': { }}
##Score = {'last': x, 'W': x, 'L': x}
def updateScore(lid, p1, p2, sc1, sc2):
sid = findPlayerScore(p1, lid)
PLDATA[PLAYERS[p1]]['last'] = EVENT
PLDATA[PLAYERS[p1]]['Ladders'][sid]['played'] += 1
scores = PLDATA[PLAYERS[p1]]['Ladders'][sid]['Scores']
eid = len(LADDERS[lid]['Events']) - 1
if(p2 in scores):
scores[p2]['W'] += sc1
scores[p2]['L'] += sc2
scores[p2]['last'] = eid
else:
scores[p2] = {'W': sc1, 'L': sc2, 'last': eid}
##matchmaking holy moly
##InProg {'P1': 's', 'P2': 's'}
def callMatch(qid, p1, p2):
match = {'P1': p1, 'P2': p2}
QUEUES[qid]['InProg'].append(match)
QUEUES[qid]['Queue'].remove(p1)
QUEUES[qid]['Queue'].remove(p2)
RESERVED[p1] = qid
RESERVED[p2] = qid
def matchMake(qid, deep):
tque = list(QUEUES[qid]['Queue'])
tque = [tque[x] for x in range(len(tque)) if tque[x] not in RESERVED]
lad = QUEUES[qid]['Event']
select = QUEUES[qid]['setups'] * SEARCH
while(deep > 0 and len(tque) >= 2):
pl1 = tque[0]
tque.remove(pl1)
if(select > len(tque)):
select = len(tque)
opp = [tque[x] for x in range(select)]
pl2 = getBestOpp(qid, pl1, opp)
print("callMatch(" + str(qid) + ", '" + pl1 + "', '" + pl2 + "')")
tque.remove(pl2)
deep -= 1
def getBestOpp(qid, pl1, que):
lid = QUEUES[qid]['Event']
eid = len(LADDERS[lid]['Events']) - 1
diff = 10000
opp = ''
check = [que[x] for x in range(len(que)) if not isRecent(lid, pl1, que[x])]
if(len(check) == 0):
check = que
for x in range(len(check)):
temp = abs(getPlayerElo(lid, pl1) - getPlayerElo(lid, check[x]))
if(diff > temp):
diff = temp
opp = check[x]
return opp
def isRecent(lid, pl1, pl2):
eid = len(LADDERS[lid]['Events']) - 1
sid = findPlayerScore(pl1, lid)
scores = PLDATA[PLAYERS[pl1]]['Ladders'][sid]['Scores']
if(pl2 in scores and scores[pl2]['last'] == eid):
return True
else:
return False
##Ladder = {'LID': x, 'played': x, 'Scores': { }}
##Score = {'last': x, 'W': x, 'L': x}
def DATAFIX(ver):
## Make all names lower case
if(ver == 0):
for x in LADDERS:
#Ladder Elos
for y in iter(x['Players']):
if not y.islower():
x['Players'][y.lower()] = x['Players'][y]
del x['Players'][y]
#Match names
for y in x['Events']:
for z in y['Sets']:
if not z['P1'].islower():
z['P1'] = z['P1'].lower()
if not z['P2'].islower():
z['P2'] = z['P2'].lower()
for x in iter(PLAYERS):
if not x.islower():
PLAYERS[x.lower()] = PLAYERS[x]
del PLAYERS[x]
#this will break data if run again at this point
for x in PLDATA:
for y in x['Ladders']:
newscores = {}
for z in y['Scores']:
newscores[z['Opp'].lower()] = {'W': z['W'], 'L': z['L'], 'last': z['last']}
y['Scores'] = newscores
| JPShaya/ladder | ladderweb.py | Python | mit | 8,779 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
AVAILABLE_PRIORITIES = [
('0', 'Bad'),
('1', 'Below Average'),
('2', 'Average'),
('3', 'Good'),
('4', 'Excellent')
]
class hr_recruitment_source(osv.osv):
""" Sources of HR Recruitment """
_name = "hr.recruitment.source"
_description = "Source of Applicants"
_columns = {
'name': fields.char('Source Name', size=64, required=True, translate=True),
}
class hr_recruitment_stage(osv.osv):
""" Stage of HR Recruitment """
_name = "hr.recruitment.stage"
_description = "Stage of Recruitment"
_order = 'sequence'
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of stages."),
'department_id':fields.many2one('hr.department', 'Specific to a Department', help="Stages of the recruitment process may be different per department. If this stage is common to all departments, keep this field empty."),
'requirements': fields.text('Requirements'),
'template_id': fields.many2one('email.template', 'Use template', help="If set, a message is posted on the applicant using the template when the applicant is set to the stage."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
_defaults = {
'sequence': 1,
}
class hr_recruitment_degree(osv.osv):
""" Degree of HR Recruitment """
_name = "hr.recruitment.degree"
_description = "Degree of Recruitment"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of degrees."),
}
_defaults = {
'sequence': 1,
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the Degree of Recruitment must be unique!')
]
class hr_applicant(osv.Model):
_name = "hr.applicant"
_description = "Applicant"
_order = "id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'hr_recruitment.mt_applicant_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'hr_recruitment.mt_applicant_stage_changed': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
}
_mail_mass_mailing = _('Applicants')
def _get_default_department_id(self, cr, uid, context=None):
""" Gives default department by checking if present in the context """
return (self._resolve_department_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
department_id = self._get_default_department_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], department_id, [('fold', '=', False)], context=context)
def _resolve_department_id_from_context(self, cr, uid, context=None):
""" Returns ID of department based on the value of 'default_department_id'
context key, or None if it cannot be resolved to a single
department.
"""
if context is None:
context = {}
if type(context.get('default_department_id')) in (int, long):
return context.get('default_department_id')
if isinstance(context.get('default_department_id'), basestring):
department_name = context['default_department_id']
department_ids = self.pool.get('hr.department').name_search(cr, uid, name=department_name, context=context)
if len(department_ids) == 1:
return int(department_ids[0][0])
return None
def _get_default_company_id(self, cr, uid, department_id=None, context=None):
company_id = False
if department_id:
department = self.pool['hr.department'].browse(cr, uid, department_id, context=context)
company_id = department.company_id.id if department and department.company_id else False
if not company_id:
company_id = self.pool['res.company']._company_default_get(cr, uid, 'hr.applicant', context=context)
return company_id
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('hr.recruitment.stage')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('department_id', '=', False), ('fold', '=', False): add default columns that are not folded
# - OR ('department_id', 'in', department_id), ('fold', '=', False) if department_id: add department columns that are not folded
department_id = self._resolve_department_id_from_context(cr, uid, context=context)
search_domain = []
if department_id:
search_domain += ['|', ('department_id', '=', department_id)]
search_domain += ['|', ('id', 'in', ids), ('department_id', '=', False)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
res = {}
for issue in self.browse(cr, uid, ids, context=context):
for field in fields:
res[issue.id] = {}
duration = 0
ans = False
hours = 0
if field in ['day_open']:
if issue.date_open:
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
elif field in ['day_close']:
if issue.date_closed:
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
ans = date_close - date_create
if ans:
duration = float(ans.days)
res[issue.id][field] = abs(float(duration))
return res
def _get_attachment_number(self, cr, uid, ids, fields, args, context=None):
res = dict.fromkeys(ids, 0)
for app_id in ids:
res[app_id] = self.pool['ir.attachment'].search_count(cr, uid, [('res_model', '=', 'hr.applicant'), ('res_id', '=', app_id)], context=context)
return res
_columns = {
'name': fields.char('Subject / Application Name', size=128, required=True),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the case without removing it."),
'description': fields.text('Description'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'probability': fields.float('Probability'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage', track_visibility='onchange',
domain="['|', ('department_id', '=', department_id), ('department_id', '=', False)]"),
'last_stage_id': fields.many2one('hr.recruitment.stage', 'Last Stage',
help='Stage of the applicant before being in the current stage. Used for lost cases analysis.'),
'categ_ids': fields.many2many('hr.applicant_category', string='Tags'),
'company_id': fields.many2one('res.company', 'Company'),
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='onchange'),
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'date_action': fields.date('Next Action Date'),
'title_action': fields.char('Next Action', size=64),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Appreciation'),
'job_id': fields.many2one('hr.job', 'Applied Job'),
'salary_proposed_extra': fields.char('Proposed Salary Extra', size=100, help="Salary Proposed by the Organisation, extra advantages"),
'salary_expected_extra': fields.char('Expected Salary Extra', size=100, help="Salary Expected by Applicant, extra advantages"),
'salary_proposed': fields.float('Proposed Salary', help="Salary Proposed by the Organisation"),
'salary_expected': fields.float('Expected Salary', help="Salary Expected by Applicant"),
'availability': fields.integer('Availability', help="The number of days in which the applicant will be available to start working"),
'partner_name': fields.char("Applicant's Name", size=64),
'partner_phone': fields.char('Phone', size=32),
'partner_mobile': fields.char('Mobile', size=32),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department', 'Department'),
'survey': fields.related('job_id', 'survey_id', type='many2one', relation='survey.survey', string='Survey'),
'response_id': fields.many2one('survey.user_input', "Response", ondelete='set null', oldname="response"),
'reference': fields.char('Referred By', size=128),
'source_id': fields.many2one('hr.recruitment.source', 'Source'),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='day_open', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='day_close', type="float", store=True),
'color': fields.integer('Color Index'),
'emp_id': fields.many2one('hr.employee', string='Employee', help='Employee linked to the applicant.'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_number': fields.function(_get_attachment_number, string='Number of Attachments', type="integer"),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: uid,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'department_id': lambda s, cr, uid, c: s._get_default_department_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s._get_default_company_id(cr, uid, s._get_default_department_id(cr, uid, c), c),
'color': 0,
'date_last_stage_update': fields.datetime.now,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def onchange_job(self, cr, uid, ids, job_id=False, context=None):
department_id = False
if job_id:
job_record = self.pool.get('hr.job').browse(cr, uid, job_id, context=context)
department_id = job_record and job_record.department_id and job_record.department_id.id or False
user_id = job_record and job_record.user_id and job_record.user_id.id or False
return {'value': {'department_id': department_id, 'user_id': user_id}}
def onchange_department_id(self, cr, uid, ids, department_id=False, stage_id=False, context=None):
if not stage_id:
stage_id = self.stage_find(cr, uid, [], department_id, [('fold', '=', False)], context=context)
return {'value': {'stage_id': stage_id}}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
data = {'partner_phone': False,
'partner_mobile': False,
'email_from': False}
if partner_id:
addr = self.pool.get('res.partner').browse(cr, uid, partner_id, context)
data.update({'partner_phone': addr.phone,
'partner_mobile': addr.mobile,
'email_from': addr.email})
return {'value': data}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- department_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
department_ids = []
if section_id:
department_ids.append(section_id)
for case in cases:
if case.department_id:
department_ids.append(case.department_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if department_ids:
search_domain += ['|', ('department_id', 'in', department_ids)]
search_domain.append(('department_id', '=', False))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('hr.recruitment.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def action_makeMeeting(self, cr, uid, ids, context=None):
""" This opens Meeting's calendar view to schedule meeting on current applicant
@return: Dictionary value for created Meeting view
"""
applicant = self.browse(cr, uid, ids[0], context)
applicant_ids = []
if applicant.partner_id:
applicant_ids.append(applicant.partner_id.id)
if applicant.department_id and applicant.department_id.manager_id and applicant.department_id.manager_id.user_id and applicant.department_id.manager_id.user_id.partner_id:
applicant_ids.append(applicant.department_id.manager_id.user_id.partner_id.id)
category = self.pool.get('ir.model.data').get_object(cr, uid, 'hr_recruitment', 'categ_meet_interview', context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_partner_ids': applicant_ids,
'default_user_id': uid,
'default_name': applicant.name,
'default_categ_ids': category and [category.id] or False,
}
return res
def action_start_survey(self, cr, uid, ids, context=None):
context = context if context else {}
applicant = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
# create a response and link it to this applicant
if not applicant.response_id:
response_id = response_obj.create(cr, uid, {'survey_id': applicant.survey.id, 'partner_id': applicant.partner_id.id}, context=context)
self.write(cr, uid, ids[0], {'response_id': response_id}, context=context)
else:
response_id = applicant.response_id.id
# grab the token of the response and start surveying
response = response_obj.browse(cr, uid, response_id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_start_survey(cr, uid, [applicant.survey.id], context=context)
def action_print_survey(self, cr, uid, ids, context=None):
""" If response is available then print this response otherwise print survey form (print template of the survey) """
context = context if context else {}
applicant = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
if not applicant.response_id:
return survey_obj.action_print_survey(cr, uid, [applicant.survey.id], context=context)
else:
response = response_obj.browse(cr, uid, applicant.response_id.id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_print_survey(cr, uid, [applicant.survey.id], context=context)
def action_get_attachment_tree_view(self, cr, uid, ids, context=None):
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_attachment')
action = self.pool.get(model).read(cr, uid, action_id, context=context)
action['context'] = {'default_res_model': self._name, 'default_res_id': ids[0]}
action['domain'] = str(['&', ('res_model', '=', self._name), ('res_id', 'in', ids)])
return action
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(hr_applicant, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for applicant in self.browse(cr, uid, ids, context=context):
if applicant.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, applicant, partner=applicant.partner_id, reason=_('Contact'))
elif applicant.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, applicant, email=applicant.email_from, reason=_('Contact Email'))
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
val = msg.get('from').split('<')[0]
defaults = {
'name': msg.get('subject') or _("No Subject"),
'partner_name': val,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(hr_applicant, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
context['mail_create_nolog'] = True
if vals.get('department_id') and not context.get('default_department_id'):
context['default_department_id'] = vals.get('department_id')
if vals.get('job_id') or context.get('default_job_id'):
job_id = vals.get('job_id') or context.get('default_job_id')
vals.update(self.onchange_job(cr, uid, [], job_id, context=context)['value'])
obj_id = super(hr_applicant, self).create(cr, uid, vals, context=context)
applicant = self.browse(cr, uid, obj_id, context=context)
if applicant.job_id:
name = applicant.partner_name if applicant.partner_name else applicant.name
self.pool['hr.job'].message_post(
cr, uid, [applicant.job_id.id],
body=_('New application from %s') % name,
subtype="hr_recruitment.mt_job_applicant_new", context=context)
return obj_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = True
# user_id change: update date_open
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
# stage_id: track last stage before update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
for applicant in self.browse(cr, uid, ids, context=None):
vals['last_stage_id'] = applicant.stage_id.id
res = super(hr_applicant, self).write(cr, uid, [applicant.id], vals, context=context)
else:
res = super(hr_applicant, self).write(cr, uid, ids, vals, context=context)
# post processing: if job changed, post a message on the job
if vals.get('job_id'):
for applicant in self.browse(cr, uid, ids, context=None):
name = applicant.partner_name if applicant.partner_name else applicant.name
self.pool['hr.job'].message_post(
cr, uid, [vals['job_id']],
body=_('New application from %s') % name,
subtype="hr_recruitment.mt_job_applicant_new", context=context)
# post processing: if stage changed, post a message in the chatter
if vals.get('stage_id'):
stage = self.pool['hr.recruitment.stage'].browse(cr, uid, vals['stage_id'], context=context)
if stage.template_id:
# TDENOTE: probably factorize me in a message_post_with_template generic method FIXME
compose_ctx = dict(context,
active_ids=ids)
compose_id = self.pool['mail.compose.message'].create(
cr, uid, {
'model': self._name,
'composition_mode': 'mass_mail',
'template_id': stage.template_id.id,
'same_thread': True,
'post': True,
'notify': True,
}, context=compose_ctx)
self.pool['mail.compose.message'].write(
cr, uid, [compose_id],
self.pool['mail.compose.message'].onchange_template_id(
cr, uid, [compose_id],
stage.template_id.id, 'mass_mail', self._name, False,
context=compose_ctx)['value'],
context=compose_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [compose_id], context=compose_ctx)
return res
def create_employee_from_applicant(self, cr, uid, ids, context=None):
""" Create an hr.employee from the hr.applicants """
if context is None:
context = {}
hr_employee = self.pool.get('hr.employee')
model_data = self.pool.get('ir.model.data')
act_window = self.pool.get('ir.actions.act_window')
emp_id = False
for applicant in self.browse(cr, uid, ids, context=context):
address_id = contact_name = False
if applicant.partner_id:
address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']
contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]
if applicant.job_id and (applicant.partner_name or contact_name):
applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1}, context=context)
create_ctx = dict(context, mail_broadcast=True)
emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or contact_name,
'job_id': applicant.job_id.id,
'address_home_id': address_id,
'department_id': applicant.department_id.id or False,
'address_id': applicant.company_id and applicant.company_id.partner_id and applicant.company_id.partner_id.id or False,
'work_email': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.email or False,
'work_phone': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.phone or False,
}, context=create_ctx)
self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)
self.pool['hr.job'].message_post(
cr, uid, [applicant.job_id.id],
body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,
subtype="hr_recruitment.mt_job_applicant_hired", context=context)
else:
raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))
action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')
dict_act_window = act_window.read(cr, uid, action_id, [])
if emp_id:
dict_act_window['res_id'] = emp_id
dict_act_window['view_mode'] = 'form,tree'
return dict_act_window
def get_empty_list_help(self, cr, uid, help, context=None):
context['empty_list_help_model'] = 'hr.job'
context['empty_list_help_id'] = context.get('default_job_id', None)
context['empty_list_help_document_name'] = _("job applicants")
return super(hr_applicant, self).get_empty_list_help(cr, uid, help, context=context)
class hr_job(osv.osv):
_inherit = "hr.job"
_name = "hr.job"
_inherits = {'mail.alias': 'alias_id'}
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context=None):
res = {}
attachment_obj = self.pool.get('ir.attachment')
for job_id in ids:
applicant_ids = self.pool.get('hr.applicant').search(cr, uid, [('job_id', '=', job_id)], context=context)
res[job_id] = attachment_obj.search(
cr, uid, [
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', '=', job_id),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicant_ids)
], context=context)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Applicant = self.pool['hr.applicant']
return {
job_id: {
'application_count': Applicant.search_count(cr,uid, [('job_id', '=', job_id)], context=context),
'documents_count': len(self._get_attached_docs(cr, uid, [job_id], field_name, arg, context=context)[job_id])
}
for job_id in ids
}
_columns = {
'survey_id': fields.many2one('survey.survey', 'Interview Form', help="Choose an interview form for this job position and you will be able to print/answer this interview from all applicants who apply for this job"),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically "
"create new applicants for this job position."),
'address_id': fields.many2one('res.partner', 'Job Location', help="Address where employees are working"),
'application_ids': fields.one2many('hr.applicant', 'job_id', 'Applications'),
'application_count': fields.function(_count_all, type='integer', string='Applications', multi=True),
'manager_id': fields.related('department_id', 'manager_id', type='many2one', string='Department Manager', relation='hr.employee', readonly=True, store=True),
'document_ids': fields.function(_get_attached_docs, type='one2many', relation='ir.attachment', string='Applications'),
'documents_count': fields.function(_count_all, type='integer', string='Documents', multi=True),
'user_id': fields.many2one('res.users', 'Recruitment Responsible', track_visibility='onchange'),
'color': fields.integer('Color Index'),
}
def _address_get(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
_defaults = {
'address_id': _address_get
}
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all jobs and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(hr_job, self)._auto_init,
'hr.applicant', self._columns['alias_id'], 'name', alias_prefix='job+', alias_defaults={'job_id': 'id'}, context=context)
def create(self, cr, uid, vals, context=None):
alias_context = dict(context, alias_model_name='hr.applicant', alias_parent_model_name=self._name)
job_id = super(hr_job, self).create(cr, uid, vals, context=alias_context)
job = self.browse(cr, uid, job_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [job.alias_id.id], {'alias_parent_thread_id': job_id, "alias_defaults": {'job_id': job_id}}, context)
return job_id
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the job position.
mail_alias = self.pool.get('mail.alias')
alias_ids = [job.alias_id.id for job in self.browse(cr, uid, ids, context=context) if job.alias_id]
res = super(hr_job, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
def action_print_survey(self, cr, uid, ids, context=None):
job = self.browse(cr, uid, ids, context=context)[0]
survey_id = job.survey_id.id
return self.pool.get('survey.survey').action_print_survey(cr, uid, [survey_id], context=context)
def action_get_attachment_tree_view(self, cr, uid, ids, context=None):
#open attachments of job and related applicantions.
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_attachment')
action = self.pool.get(model).read(cr, uid, action_id, context=context)
applicant_ids = self.pool.get('hr.applicant').search(cr, uid, [('job_id', 'in', ids)], context=context)
action['context'] = {'default_res_model': self._name, 'default_res_id': ids[0]}
action['domain'] = str(['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicant_ids)])
return action
def action_set_no_of_recruitment(self, cr, uid, id, value, context=None):
return self.write(cr, uid, [id], {'no_of_recruitment': value}, context=context)
class applicant_category(osv.osv):
""" Category of applicant """
_name = "hr.applicant_category"
_description = "Category of applicant"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| bealdav/OpenUpgrade | addons/hr_recruitment/hr_recruitment.py | Python | agpl-3.0 | 33,759 |
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.update import update
pytestmark = [
test_requirements.tag, pytest.mark.tier(2),
pytest.mark.provider(
classes=[InfraProvider],
required_fields=[
'datacenters',
'clusters'
],
selector=ONE
),
pytest.mark.usefixtures('setup_provider')
]
test_items = [
('clusters', None),
('infra_vms', 'ProviderVms'),
('infra_templates', 'ProviderTemplates')
]
@pytest.fixture(params=test_items, ids=[collection_type for collection_type, _ in test_items],
scope='function')
def testing_vis_object(request, provider, appliance):
""" Fixture creates class object for tag visibility test
Returns: class object of certain type
"""
collection_name, destination = request.param
collection = getattr(appliance.collections, collection_name)
view = navigate_to(provider, destination) if destination else navigate_to(collection, 'All')
names = view.entities.entity_names
if not names:
pytest.skip("No content found for test of {}".format(collection))
return collection.instantiate(name=names[0], provider=provider)
@pytest.fixture(scope='function')
def group_tag_datacenter_combination(group_with_tag, provider):
with update(group_with_tag):
group_with_tag.host_cluster = ([provider.data['name'],
provider.data['datacenters'][0]], True)
@pytest.mark.meta(blockers=[BZ(1533391, forced_streams=["5.9", "upstream"])])
@pytest.mark.parametrize('visibility', [True, False], ids=['visible', 'not_visible'])
def test_tagvis_tag_datacenter_combination(testing_vis_object, group_tag_datacenter_combination,
check_item_visibility, visibility):
""" Tests template visibility with combination of tag and selected
datacenter filters in the group
Prerequisites:
Catalog, tag, role, group and restricted user should be created
Steps:
1. As admin add tag
2. Login as restricted user, item is visible for user
3. As admin remove tag
4. Login as restricted user, iten is not visible for user
"""
check_item_visibility(testing_vis_object, visibility)
| anurag03/integration_tests | cfme/tests/infrastructure/test_infra_tag_filters_combination.py | Python | gpl-2.0 | 2,510 |
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import copy
import logging
import os
from functools import wraps
import pytest
from mock import patch
import kolibri
from kolibri.core.deviceadmin.tests.test_dbrestore import is_sqlite_settings
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def version_file_restore(func):
"""
Decorator that reads contents of the version file and restores it after
calling ``func(orig_version='x.y', version_file='/path')``.
If a version file doesn't exist, it calls ``func(... version_file=None)``
This decorator is used for testing functions that trigger during upgrades
without mocking more than necessary.
"""
@wraps(func)
def wrapper(*args, **kwargs):
version_file = cli.version_file()
version_file_existed = os.path.isfile(version_file)
orig_version = kolibri.__version__
kwargs['orig_version'] = orig_version
if version_file_existed:
kwargs['version_file'] = version_file
func(*args, **kwargs)
if version_file_existed:
open(version_file, "w").write(orig_version)
return wrapper
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append(
(LEVEL, msg)
)
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, '__log', logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, '_log', log_logger)
@pytest.fixture
def conf():
from kolibri.utils import conf
old_config = copy.deepcopy(conf.config)
yield conf
conf.update(old_config)
conf.save()
def test_bogus_plugin_autoremove(conf):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
conf.config["INSTALLED_APPS"].append(plugin_name)
conf.save()
conf.autoremove_unavailable_plugins()
assert plugin_name not in conf.config["INSTALLED_APPS"]
def test_bogus_plugin_autoremove_no_path(conf):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
conf.config["INSTALLED_APPS"].append(plugin_name)
conf.save()
conf.autoremove_unavailable_plugins()
assert plugin_name not in conf.config["INSTALLED_APPS"]
def test_bogus_plugin_disable(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin("i_do_not_exist", disable=True)
assert installed_apps_before == conf.config["INSTALLED_APPS"]
def test_plugin_cannot_be_imported_disable(conf):
"""
A plugin may be in conf.config['INSTALLED_APPS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
conf.config["INSTALLED_APPS"].append(plugin_name)
conf.save()
cli.plugin(plugin_name, disable=True)
assert plugin_name not in conf.config["INSTALLED_APPS"]
def test_real_plugin_disable(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
def test_real_plugin_disable_twice(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
def test_plugin_with_no_plugin_class(conf):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin("os.path")
assert installed_apps_before == conf.config["INSTALLED_APPS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch('kolibri.core.content.utils.annotation.update_channel_metadata'):
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
activate_log_logger(monkeypatch)
monkeypatch.setattr(server, 'start', start_mock)
test_port = 1234
os.environ['KOLIBRI_HTTP_PORT'] = str(test_port)
# force a reload of conf.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
server.start = start_mock
cli.start(daemon=False)
with pytest.raises(SystemExit) as excinfo:
cli.stop()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, 'get_status', status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@version_file_restore
@patch('kolibri.utils.cli.update')
@patch('kolibri.utils.cli.plugin')
@patch('kolibri.core.deviceadmin.utils.dbbackup')
def test_first_run(
dbbackup, plugin, update, version_file=None, orig_version=None):
"""
Tests that the first_run() function performs as expected
"""
if version_file:
os.unlink(version_file)
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri.core.settings import DEFAULT_PLUGINS
assert plugin.call_count == len(DEFAULT_PLUGINS)
@pytest.mark.django_db
@version_file_restore
@patch('kolibri.utils.cli.update')
def test_update(update, version_file=None, orig_version=None):
"""
Tests that update() function performs as expected, creating a database
backup automatically when version changes
"""
version_file = cli.version_file()
open(version_file, "w").write(orig_version + "_test")
if is_sqlite_settings():
with patch('kolibri.core.deviceadmin.utils.dbbackup') as dbbackup:
cli.initialize()
dbbackup.assert_called_once()
else:
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch('kolibri.utils.cli.update')
@patch('kolibri.core.deviceadmin.utils.dbbackup')
def test_update_no_version_change(dbbackup, update, orig_version=None):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
def test_cli_parsing():
test_patterns = (
(['start'], {'start': True}, []),
(['stop'], {'stop': True}, []),
(['shell'], {'shell': True}, []),
(['manage', 'shell'], {'manage': True, 'COMMAND': 'shell'}, []),
(['manage', 'help'], {'manage': True, 'COMMAND': 'help'}, []),
(['manage', 'blah'], {'manage': True, 'COMMAND': 'blah'}, []),
(
['manage', 'blah', '--debug', '--', '--django-arg'],
{'manage': True, 'COMMAND': 'blah', '--debug': True},
['--django-arg']
),
(
['manage', 'blah', '--django-arg'],
{'manage': True, 'COMMAND': 'blah'},
['--django-arg']
),
)
for p, docopt_expected, django_expected in test_patterns:
docopt, django = cli.parse_args(p)
for k, v in docopt_expected.items():
assert docopt[k] == v
assert django == django_expected
| DXCanas/kolibri | kolibri/utils/tests/test_cli.py | Python | mit | 9,291 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from derpconf.config import Config # NOQA
Config.define('OAUTH_LATENCY', 100, 'Login route latency in MS. 0 to none.', 'General')
Config.define('TOKEN_LATENCY', 100, 'Token route latency in MS. 0 to none.', 'General')
Config.define('USERDATA_LATENCY', 100, 'User Data route latency in MS. 0 to none.', 'General')
def generate_config():
print Config.get_config_text()
if __name__ == '__main__':
generate_config()
| heynemann/fakebook | fakebook/config/__init__.py | Python | mit | 469 |
# Copyright 2007 Casey Durfee
# Copyright 2007 Gabriel Farrell
#
# This file is part of Kochief.
#
# Kochief is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kochief is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kochief. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns('kochief.cataloging.views',
url(r'^record/(.+)/edit$', 'edit_record', name='cataloging-edit-record'),
)
| edsu/lochief | kochief/cataloging/urls.py | Python | gpl-3.0 | 894 |
# coding=utf-8
from __future__ import absolute_import
__author__ = 'mazesoul'
import struct
from datetime import datetime
from pyfdfs.enums import IP_ADDRESS_SIZE, FDFS_STORAGE_ID_MAX_SIZE, FDFS_DOMAIN_NAME_MAX_SIZE, \
FDFS_VERSION_SIZE, FDFS_SPACE_SIZE_BASE_INDEX, FDFS_GROUP_NAME_MAX_LEN
class BaseAttr(object):
def __init__(self, name, val=None):
if name is None:
raise Exception("name is required! cannot be None")
self.name = name
self.val = val
def __get__(self, obj, owner):
if obj is None:
return self
return obj._data.get(self.name, self.val)
def __set__(self, obj, val):
obj._data[self.name] = val
def __delete__(self, obj):
del obj._data[self.name]
class IntAttr(BaseAttr):
def __init__(self, name, val=None):
val = val or 0
super(IntAttr, self).__init__(name, val)
class StrAttr(BaseAttr):
def __init__(self, name, val=None):
val = val or ""
super(StrAttr, self).__init__(name, val)
def __set__(self, obj, val):
obj._data[self.name] = str(val).strip("\x00")
class DatetimeAttr(BaseAttr):
def __init__(self, name, val=None):
val = datetime.fromtimestamp(val or 0).isoformat()
super(DatetimeAttr, self).__init__(name, val)
def __set__(self, obj, val):
obj._data[self.name] = datetime.fromtimestamp(val).isoformat()
class SpaceAttr(StrAttr):
def __init__(self, name, index=None):
self.suffix = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB',)
self.index = index or 0
val = "0B"
super(SpaceAttr, self).__init__(name, val)
def __set__(self, obj, val):
multiples = 1024.0
if val < multiples:
obj._data[self.name] = '{0:d}{1}'.format(val, self.suffix[self.index])
return
for suffix in self.suffix[self.index:]:
if val < multiples:
obj._data[self.name] = '{0:.2f}{1}'.format(val, suffix)
return
val /= multiples
obj._data[self.name] = val
class BaseInfo(object):
def __str__(self):
str_list = ["%s:\n" % self.desc]
for attr_item in self.attributes:
str_list.append("\t%s = %s\n" % (attr_item.replace("_", " "), getattr(self, attr_item)))
return "".join(str_list)
def get_fmt_size(self):
return struct.calcsize(getattr(self, "fmt", ""))
def set_info(self, byte_stream):
for idx, info in enumerate(struct.unpack(self.fmt, byte_stream)):
setattr(self, self.attributes[idx], info)
class BaseMeta(type):
def __new__(cls, name, bases, attrs):
new_attrs = {}
str_attrs = attrs.pop("str_attrs", [])
date_attrs = attrs.pop("date_attrs", [])
space_attrs = attrs.pop("space_attrs", [])
attr_list = attrs.pop("attributes", [])
new_attrs["attributes"] = attr_list
for item in attr_list:
attr_obj = attrs.get(item)
if attr_obj is None:
if item in str_attrs:
attr_obj = StrAttr(item)
elif item in date_attrs:
attr_obj = DatetimeAttr(item)
elif item in space_attrs:
attr_obj = SpaceAttr(item)
else:
attr_obj = IntAttr(item)
new_attrs[item] = attr_obj
for attr_name, attr in attrs.items():
new_attrs[attr_name] = attr
return super(BaseMeta, cls).__new__(cls, name, (BaseInfo,) + bases, new_attrs)
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj._data = {}
return obj
class StorageInfo(object):
"""
@ 1 byte: status
@ FDFS_STORAGE_ID_MAX_SIZE bytes: id
@ IP_ADDRESS_SIZE bytes: ip_addr
@ FDFS_DOMAIN_NAME_MAX_SIZE bytes: domain_name
@ FDFS_STORAGE_ID_MAX_SIZE bytes: src_ip
@ FDFS_VERSION_SIZE bytes: version
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: join_time
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: up_time
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_mb
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free_mb
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: upload_priority
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store_path_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir_count_per_path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current_write_path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage_port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage_http_port
@ 4 bytes: alloc_count
@ 4 bytes: current_count
@ 4 bytes: max_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_upload_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_upload_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_append_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_append_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_modify_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_modify_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_truncate_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_truncate_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_set_meta_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_set_meta_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_delete_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_delete_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_download_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_download_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_get_meta_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_get_meta_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_create_link_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_create_link_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_delete_link_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_delete_link_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_upload_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_upload_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_append_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_append_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_modify_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_modify_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_download_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_download_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_sync_in_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_sync_in_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_sync_out_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_sync_out_bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_file_open_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_file_open_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_file_read_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_file_read_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_file_write_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success_file_write_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last_source_update
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last_sync_update
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last_synced_timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last_heart_beat_time
@ 1 byte: if_trunk_server
"""
__metaclass__ = BaseMeta
desc = "Storage information"
fmt = '!B %ds %ds %ds %ds %ds 10Q 3L 42Q B' % (FDFS_STORAGE_ID_MAX_SIZE, IP_ADDRESS_SIZE,
FDFS_DOMAIN_NAME_MAX_SIZE, FDFS_STORAGE_ID_MAX_SIZE,
FDFS_VERSION_SIZE)
attributes = ("status", "id", "ip_addr", "domain_name", "src_ip", "version", "join_time", "up_time",
"total_mb", "free_mb", "upload_priority", "store_path_count", "subdir_count_per_path",
"current_write_path", "storage_port", "storage_http_port",
"alloc_count", "current_count", "max_count",
"total_upload_count", "success_upload_count", "total_append_count", "success_append_count",
"total_modify_count", "success_modify_count", "total_truncate_count", "success_truncate_count",
"total_set_meta_count", "success_set_meta_count", "total_delete_count", "success_delete_count",
"total_download_count", "success_download_count", "total_get_meta_count", "success_get_meta_count",
"total_create_link_count", "success_create_link_count", "total_delete_link_count",
"success_delete_link_count", "total_upload_bytes", "success_upload_bytes", "total_append_bytes",
"success_append_bytes", "total_modify_bytes", "success_modify_bytes", "total_download_bytes",
"success_download_bytes", "total_sync_in_bytes", "success_sync_in_bytes", "total_sync_out_bytes",
"success_sync_out_bytes", "total_file_open_count", "success_file_open_count", "total_file_read_count",
"success_file_read_count", "total_file_write_count", "success_file_write_count", "last_source_update",
"last_sync_update", "last_synced_timestamp", "last_heart_beat_time", "if_trunk_server",)
str_attrs = ("id", "ip_addr", "domain_name", "src_ip", "version",)
date_attrs = ("join_time", "up_time", "last_source_update", "last_sync_update",
"last_synced_timestamp", "last_heart_beat_time",)
space_attrs = ("total_upload_bytes", "success_upload_bytes", "total_append_bytes",
"success_append_bytes", "total_modify_bytes", "success_modify_bytes", "total_download_bytes",
"success_download_bytes", "total_sync_in_bytes", "success_sync_in_bytes", "total_sync_out_bytes",
"success_sync_out_bytes",)
total_mb = SpaceAttr("total_mb", FDFS_SPACE_SIZE_BASE_INDEX)
free_mb = SpaceAttr("free_mb", FDFS_SPACE_SIZE_BASE_INDEX)
class BasicStorageInfo(object):
"""
@ FDFS_GROUP_NAME_MAX_LEN bytes: group_name
@ IP_ADDRESS_SIZE bytes: ip_addr
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current_write_path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage_port
"""
__metaclass__ = BaseMeta
desc = "BasicStorageInfo information"
fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE)
attributes = ("group_name", "ip_addr", "current_write_path", "storage_port",)
str_attrs = ("group_name", "ip_addr",)
class GroupInfo(object):
"""
@ FDFS_GROUP_NAME_MAX_LEN + 1 bytes: group_name
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total_mb
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free_mb
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: trunk_free_mb
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage_port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage_http_port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: active_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current_write_server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store_path_count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir_count_per_path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current_trunk_file_id
"""
__metaclass__ = BaseMeta
desc = "Group information"
fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
attributes = ("group_name", "total_mb", "free_mb", "trunk_free_mb", "count", "storage_port", "storage_http_port",
"active_count", "current_write_server", "store_path_count", "subdir_count_per_path",
"current_trunk_file_id",)
str_attrs = ("group_name",)
total_mb = SpaceAttr("total_mb", FDFS_SPACE_SIZE_BASE_INDEX)
free_mb = SpaceAttr("free_mb", FDFS_SPACE_SIZE_BASE_INDEX)
trunk_free_mb = SpaceAttr("trunk_free_mb", FDFS_SPACE_SIZE_BASE_INDEX)
class StorageResponseInfo(object):
"""
@ FDFS_GROUP_NAME_MAX_LEN bytes: group_name
@ filename bytes: filename
"""
__metaclass__ = BaseMeta
desc = "StorageResponseInfo information"
attributes = ("group_name", "filename",)
str_attrs = ("group_name", "filename",)
| Forrest-Liu/pyfdfs | pyfdfs/structs.py | Python | gpl-2.0 | 12,010 |
import json
import time
import requests
URL_HEAD = 'http://0.0.0.0:8001'
def create_entry(word, language, pos, definition, def_language):
resp = requests.post(
URL_HEAD + '/entry/%s/create' % language,
json=json.dumps({
'definitions': [{
'definition': definition,
'definition_language': def_language
}],
'word': word,
'part_of_speech': pos,
})
)
assert resp.status_code == 200, resp.status_code
if __name__ == '__main__':
for i in range(1500):
time.sleep(.05)
create_entry('anoki%d' % i, 'kz', 'ana', 'fanandramana %d' % i, 'mg')
| radomd92/botjagwar | test_utils/live_test.py | Python | mit | 672 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os
from itertools import chain
from setuptools import find_packages, setup
from celery_redis_sentinel import __author__, __version__
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname), 'rb') as fid:
return fid.read().decode('utf-8')
def remove_section_from_rst(text, section):
lines = text.splitlines()
section_line = lines.index(section)
separator = lines[section_line + 1]
assert set(separator) == {separator[0]}
next_section_line = next(
i for i, l in enumerate(lines[section_line + 2:])
if set(l) == {separator[0]}
)
return '\n'.join(chain(
lines[:section_line - 1],
lines[section_line + next_section_line:]
))
authors = read('AUTHORS.rst')
history = read('HISTORY.rst').replace('.. :changelog:', '')
licence = read('LICENSE.rst')
readme = read('README.rst')
requirements = read('requirements.txt').splitlines() + [
'setuptools',
]
test_requirements = (
read('requirements.txt').splitlines() +
read('requirements-dev.txt').splitlines()[1:]
)
long_description = remove_section_from_rst(
'\n\n'.join([readme, history, authors, licence]),
'Master (not yet on PyPI)'
)
setup(
name='celery-redis-sentinel',
version=__version__,
author=__author__,
description='Celery broker and results backend implementation for Redis Sentinel',
long_description=long_description,
url='https://github.com/dealertrack/celery-redis-sentinel',
license='MIT',
packages=find_packages(exclude=['tests', 'tests.*', 'test_tasks', 'test_tasks.*']),
install_requires=requirements,
test_suite='tests',
tests_require=test_requirements,
keywords=' '.join([
'celery',
'redis',
'sentinel',
'broker',
'results',
]),
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 2 - Pre-Alpha',
],
)
| dealertrack/celery-redis-sentinel | setup.py | Python | mit | 2,408 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides functions for parsing and outputting Zulu time."""
import datetime
import pytz
from infra_libs.time_functions import timestamp
def parse_zulu_time(string):
"""Parses a Zulu time string, returning None if unparseable."""
# Ugh https://bugs.python.org/issue19475.
zulu_format = "%Y-%m-%dT%H:%M:%S"
if '.' in string:
zulu_format += ".%f"
zulu_format += "Z"
try:
return datetime.datetime.strptime(string, zulu_format)
except ValueError:
return None
def parse_zulu_ts(string):
"""Parses Zulu time and converts into a timestamp or None."""
zuluparse = parse_zulu_time(string)
if zuluparse is None:
return None
return timestamp.utctimestamp(zuluparse)
def to_zulu_string(dt):
"""Returns a Zulu time string from a datetime.
Assumes naive datetime objects are in UTC.
Ensures the output always has a floating-point number of seconds.
"""
# Assume non-tz-aware datetimes are in UTC.
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = dt.replace(tzinfo=pytz.UTC)
# Convert datetime into UTC.
isodate = dt.astimezone(pytz.UTC).isoformat().split('+')[0]
# Add fractional seconds if not present.
if '.' not in isodate:
isodate += '.0'
return isodate + 'Z'
| endlessm/chromium-browser | tools/swarming_client/third_party/infra_libs/time_functions/zulu.py | Python | bsd-3-clause | 1,411 |
# -*- coding: utf-8 -*-
from multiprocessing import RawValue, RawArray, Semaphore, Lock
import ctypes
import numpy as np
import tensorflow as tf
class SharedCounter(object):
def __init__(self, initval=0):
self.val = RawValue('i', initval)
self.last_step_update_target = RawValue('i', initval)
self.lock = Lock()
def increment(self, elapsed_steps=None):
self.val.value += 1
if ((elapsed_steps is not None)
and ((self.val.value - self.last_step_update_target.value)
>= elapsed_steps)):
self.last_step_update_target.value = self.val.value
return self.val.value, True
else:
return self.val.value, False
def set_value(self, value):
self.lock.acquire()
self.val.value = value
self.lock.release()
def value(self):
return self.val.value
class Barrier:
def __init__(self, n):
self.n = n
self.counter = SharedCounter(0)
self.barrier = Semaphore(0)
def wait(self):
with self.counter.lock:
self.counter.val.value += 1
if self.counter.val.value == self.n:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
class SharedVars(object):
def __init__(self, params, opt_type=None, lr=0, step=0):
self.var_shapes = [
var.get_shape().as_list()
for var in params]
self.size = sum([np.prod(shape) for shape in self.var_shapes])
self.step = RawValue(ctypes.c_int, step)
if opt_type == 'adam':
self.ms = self.malloc_contiguous(self.size)
self.vs = self.malloc_contiguous(self.size)
self.lr = RawValue(ctypes.c_float, lr)
elif opt_type == 'adamax':
self.ms = self.malloc_contiguous(self.size)
self.vs = self.malloc_contiguous(self.size)
self.lr = RawValue(ctypes.c_float, lr)
elif opt_type == 'rmsprop':
self.vars = self.malloc_contiguous(self.size, np.ones(self.size, dtype=np.float))
elif opt_type == 'momentum':
self.vars = self.malloc_contiguous(self.size)
else:
self.vars = self.malloc_contiguous(self.size)
def malloc_contiguous(self, size, initial_val=None):
if initial_val is None:
return RawArray(ctypes.c_float, size)
else:
return RawArray(ctypes.c_float, initial_val)
class SharedFlags(object):
def __init__(self, num_actors):
self.updated = RawArray(ctypes.c_int, num_actors)
| steveKapturowski/tensorflow-rl | utils/shared_memory.py | Python | apache-2.0 | 2,714 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='rmate',
version='1.0.3',
url='https://github.com/sclukey/rmate-python',
description='Edit files over SSH.',
long_description=long_description,
author='Steven Clukey',
author_email='ssclukey@gmail.com',
license='MIT',
classifiers=[
'Topic :: Utilities',
'Environment :: Console',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
scripts=['bin/rmate'],
)
| sclukey/rmate-python | setup.py | Python | mit | 1,337 |
# Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import xbmc
import os, struct
from resources.lib.FileAccess import FileAccess
class AVIChunk:
def __init__(self):
self.empty()
def empty(self):
self.size = 0
self.fourcc = ''
self.datatype = 1
self.chunk = ''
def read(self, thefile):
data = thefile.read(4)
try:
self.size = struct.unpack('<i', data)[0]
except:
self.size = 0
# Putting an upper limit on the chunk size, in case the file is corrupt
if self.size > 0 and self.size < 10000:
self.chunk = thefile.read(self.size)
else:
self.chunk = ''
self.size = 0
class AVIList:
def __init__(self):
self.empty()
def empty(self):
self.size = 0
self.fourcc = ''
self.datatype = 2
def read(self, thefile):
data = thefile.read(4)
try:
self.size = struct.unpack('<i', data)[0]
except:
self.size = 0
self.fourcc = thefile.read(4)
class AVIHeader:
def __init__(self):
self.empty()
def empty(self):
self.dwMicroSecPerFrame = 0
self.dwMaxBytesPerSec = 0
self.dwPaddingGranularity = 0
self.dwFlags = 0
self.dwTotalFrames = 0
self.dwInitialFrames = 0
self.dwStreams = 0
self.dwSuggestedBufferSize = 0
self.dwWidth = 0
self.dwHeight = 0
class AVIStreamHeader:
def __init__(self):
self.empty()
def empty(self):
self.fccType = ''
self.fccHandler = ''
self.dwFlags = 0
self.wPriority = 0
self.wLanguage = 0
self.dwInitialFrame = 0
self.dwScale = 0
self.dwRate = 0
self.dwStart = 0
self.dwLength = 0
self.dwSuggestedBuffer = 0
self.dwQuality = 0
self.dwSampleSize = 0
self.rcFrame = ''
class AVIParser:
def __init__(self):
self.Header = AVIHeader()
self.StreamHeader = AVIStreamHeader()
def log(self, msg, level = xbmc.LOGDEBUG):
xbmc.log('AVIParser: ' + msg, level)
def determineLength(self, filename):
self.log("determineLength " + filename)
try:
self.File = FileAccess.open(filename, "rb")
except:
self.log("Unable to open the file")
return 0
dur = self.readHeader()
self.File.close()
self.log('Duration: ' + str(dur))
return dur
def readHeader(self):
# AVI Chunk
data = self.getChunkOrList()
if data.datatype != 2:
self.log("Not an avi")
return 0
if data.fourcc[0:4] != "AVI ":
self.log("Not a basic AVI: " + data.fourcc[:2])
return 0
# Header List
data = self.getChunkOrList()
if data.fourcc != "hdrl":
self.log("Header not found: " + data.fourcc)
return 0
# Header chunk
data = self.getChunkOrList()
if data.fourcc != 'avih':
self.log('Header chunk not found: ' + data.fourcc)
return 0
self.parseHeader(data)
# Stream list
data = self.getChunkOrList()
if self.Header.dwStreams > 10:
self.Header.dwStreams = 10
for i in range(self.Header.dwStreams):
if data.datatype != 2:
self.log("Unable to find streams")
return 0
listsize = data.size
# Stream chunk number 1, the stream header
data = self.getChunkOrList()
if data.datatype != 1:
self.log("Broken stream header")
return 0
self.StreamHeader.empty()
self.parseStreamHeader(data)
# If this is the video header, determine the duration
if self.StreamHeader.fccType == 'vids':
return self.getStreamDuration()
# If this isn't the video header, skip through the rest of these
# stream chunks
try:
if listsize - data.size - 12 > 0:
self.File.seek(listsize - data.size - 12, 1)
data = self.getChunkOrList()
except:
self.log("Unable to seek")
self.log("Video stream not found")
return 0
def getStreamDuration(self):
try:
return int(self.StreamHeader.dwLength / (float(self.StreamHeader.dwRate) / float(self.StreamHeader.dwScale)))
except:
return 0
def parseHeader(self, data):
try:
header = struct.unpack('<iiiiiiiiiiiiii', data.chunk)
self.Header.dwMicroSecPerFrame = header[0]
self.Header.dwMaxBytesPerSec = header[1]
self.Header.dwPaddingGranularity = header[2]
self.Header.dwFlags = header[3]
self.Header.dwTotalFrames = header[4]
self.Header.dwInitialFrames = header[5]
self.Header.dwStreams = header[6]
self.Header.dwSuggestedBufferSize = header[7]
self.Header.dwWidth = header[8]
self.Header.dwHeight = header[9]
except:
self.Header.empty()
self.log('Unable to parse the header')
def parseStreamHeader(self, data):
try:
self.StreamHeader.fccType = data.chunk[0:4]
self.StreamHeader.fccHandler = data.chunk[4:8]
header = struct.unpack('<ihhiiiiiiiid', data.chunk[8:])
self.StreamHeader.dwFlags = header[0]
self.StreamHeader.wPriority = header[1]
self.StreamHeader.wLanguage = header[2]
self.StreamHeader.dwInitialFrame = header[3]
self.StreamHeader.dwScale = header[4]
self.StreamHeader.dwRate = header[5]
self.StreamHeader.dwStart = header[6]
self.StreamHeader.dwLength = header[7]
self.StreamHeader.dwSuggestedBuffer = header[8]
self.StreamHeader.dwQuality = header[9]
self.StreamHeader.dwSampleSize = header[10]
self.StreamHeader.rcFrame = ''
except:
self.StreamHeader.empty()
self.log("Error reading stream header")
def getChunkOrList(self):
data = self.File.read(4)
if data == "RIFF" or data == "LIST":
dataclass = AVIList()
elif len(data) == 0:
dataclass = AVIChunk()
dataclass.datatype = 3
else:
dataclass = AVIChunk()
dataclass.fourcc = data
# Fill in the chunk or list info
dataclass.read(self.File)
return dataclass
| Jasonra/XBMC-PseudoTV | resources/lib/parsers/AVIParser.py | Python | gpl-3.0 | 7,403 |
# Created By: Virgil Dupras
# Created On: 2005/12/16
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from .. import ogg
from .util import TestData, eq_
def test_page_valid_on_test1():
fp = open(TestData.filepath('ogg/test1.ogg'), 'rb')
page = ogg.VorbisPage(fp)
assert page.valid
eq_(0, page.page_number)
eq_(0, page.position)
eq_(30, page.size)
fp.seek(page.start_offset + page.header_size)
data = fp.read(page.size)
eq_(data, page.read())
page = next(page)
assert page.valid
eq_(1, page.page_number)
eq_(0, page.position)
eq_(0x10f1, page.size)
page = next(page)
assert page.valid
eq_(2, page.page_number)
eq_(0, page.position)
eq_(0x91, page.size)
page = next(page)
assert page.valid
eq_(3, page.page_number)
eq_(0x2800, page.position)
eq_(0x1019, page.size)
fp.close()
def test_file_valid_on_test1():
o = ogg.Vorbis(TestData.filepath('ogg/test1.ogg'))
eq_(o.size, 101785)
eq_(o.bitrate, 160)
eq_(o.sample_rate, 44100)
eq_(o.sample_count, 0x6d3eae)
eq_(o.duration, 162)
eq_(o.artist, 'The White Stripes')
eq_(o.album, 'The White Stripes')
eq_(o.title, 'Astro')
eq_(o.genre, '')
eq_(o.comment, '')
eq_(o.year, '1999')
eq_(o.track, 8)
eq_(o.audio_offset, 0x1158)
eq_(o.audio_size, 101785 - 0x1158)
def test_file_valid_on_test2():
o = ogg.Vorbis(TestData.filepath('ogg/test2.ogg'))
eq_(103168, o.size)
eq_(199, o.bitrate)
eq_(44100, o.sample_rate)
eq_(0xb2a2c8, o.sample_count)
eq_(265, o.duration)
eq_('Ariane Moffatt', o.artist)
eq_('Le coeur dans la t\u00eate', o.album)
eq_('Le coeur dans la t\u00eate', o.title)
eq_('Pop', o.genre)
eq_('', o.comment)
eq_('2005', o.year)
eq_(3, o.track)
eq_(0xf79, o.audio_offset)
eq_(103168 - 0xf79, o.audio_size)
def test_lowercase_fieldnames():
# Support ogg files with lowercase fieldnames (artist, album, etc.)
o = ogg.Vorbis(TestData.filepath('ogg/lowercase.ogg'))
eq_(o.artist, 'The White Stripes')
eq_(o.album, 'The White Stripes')
eq_(o.title, 'Astro')
def test_track_with_slash():
# A track number field with a slash (for example, 1/20) is supported and will return the first
# number of the field.
# FILE NOTE: Because I had added 4 bytes to the TRACKNUMBER field in the test file and that I
# wasn't sure where I had to adjust the vorbis comment offset other than just in front of the
# field, I removed 4 bytes in the otherwise unused TRACKTOTAL (now TRACKT) field.
o = ogg.Vorbis(TestData.filepath('ogg/track_with_slash.ogg'))
eq_(o.track, 18)
def test_small():
# Previously, a small (<64kb) OGG file couldn't be read due to a hardcoded 64kb offset. Tix #2.
o = ogg.Vorbis(TestData.filepath('ogg/small.ogg'))
eq_(o.bitrate, 60)
eq_(o.duration, 4)
def verify_emptyness(o):
eq_(0, o.bitrate)
eq_(0, o.sample_rate)
eq_(0, o.sample_count)
eq_(0, o.duration)
eq_('', o.artist)
eq_('', o.album)
eq_('', o.title)
eq_('', o.genre)
eq_('', o.comment)
eq_('', o.year)
eq_(0, o.track)
eq_(0, o.audio_offset)
eq_(0, o.audio_size)
def test_invalid_zerofile():
o = ogg.Vorbis(TestData.filepath('zerofile'))
verify_emptyness(o)
def test_invalid_zerofill():
o = ogg.Vorbis(TestData.filepath('zerofill'))
verify_emptyness(o)
def test_invalid_randomfile():
o = ogg.Vorbis(TestData.filepath('randomfile'))
verify_emptyness(o)
def test_invalid_mp3():
o = ogg.Vorbis(TestData.filepath('mpeg/test1.mp3'))
verify_emptyness(o)
def test_invalid_wma():
o = ogg.Vorbis(TestData.filepath('wma/test1.wma'))
verify_emptyness(o)
def test_invalid_mp4():
o = ogg.Vorbis(TestData.filepath('mp4/test1.m4a'))
verify_emptyness(o)
| jmtchllrx/pyMuse | src/hsaudiotag/tests/ogg_test.py | Python | mit | 4,081 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__all__ = ['jackknife_resampling', 'jackknife_stats']
__doctest_requires__ = {'jackknife_stats': ['scipy']}
def jackknife_resampling(data):
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
""" # noqa
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n-1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
def jackknife_stats(data, statistic, confidence_level=0.95):
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr # doctest: +FLOAT_CMP
0.95742710775633832
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
# jackknife confidence interval
if not (0 < confidence_level < 1):
raise ValueError("confidence level must be in (0, 1).")
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
# Only import scipy if inputs are valid
from scipy.special import erfinv
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n-1)*(mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt((n-1)*np.mean((jack_stat - mean_jack_stat)*(jack_stat -
mean_jack_stat), axis=0))
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
z_score = np.sqrt(2.0)*erfinv(confidence_level)
conf_interval = estimate + z_score*np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
| larrybradley/astropy | astropy/stats/jackknife.py | Python | bsd-3-clause | 5,913 |
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KDescendantsProxyModel(__PyQt4_QtGui.QAbstractProxyModel):
# no doc
def ancestorSeparator(self, *args, **kwargs): # real signature unknown
pass
def columnCount(self, *args, **kwargs): # real signature unknown
pass
def data(self, *args, **kwargs): # real signature unknown
pass
def displayAncestorData(self, *args, **kwargs): # real signature unknown
pass
def flags(self, *args, **kwargs): # real signature unknown
pass
def hasChildren(self, *args, **kwargs): # real signature unknown
pass
def headerData(self, *args, **kwargs): # real signature unknown
pass
def index(self, *args, **kwargs): # real signature unknown
pass
def mapFromSource(self, *args, **kwargs): # real signature unknown
pass
def mapToSource(self, *args, **kwargs): # real signature unknown
pass
def match(self, *args, **kwargs): # real signature unknown
pass
def mimeData(self, *args, **kwargs): # real signature unknown
pass
def mimeTypes(self, *args, **kwargs): # real signature unknown
pass
def parent(self, *args, **kwargs): # real signature unknown
pass
def rowCount(self, *args, **kwargs): # real signature unknown
pass
def setAncestorSeparator(self, *args, **kwargs): # real signature unknown
pass
def setDisplayAncestorData(self, *args, **kwargs): # real signature unknown
pass
def setRootIndex(self, *args, **kwargs): # real signature unknown
pass
def setSourceModel(self, *args, **kwargs): # real signature unknown
pass
def supportedDropActions(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KDescendantsProxyModel.py | Python | gpl-2.0 | 2,147 |
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements MongoDB the storage controller for messages.
Field Mappings:
In order to reduce the disk / memory space used,
field names will be, most of the time, the first
letter of their long name.
"""
import datetime
import time
import uuid
from bson import binary
from bson import objectid
from oslo_log import log as logging
from oslo_utils import timeutils
import pymongo.errors
import pymongo.read_preferences
from zaqar.i18n import _
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.mongodb import utils
from zaqar.storage import utils as s_utils
LOG = logging.getLogger(__name__)
# NOTE(kgriffs): This value, in seconds, should be at least less than the
# minimum allowed TTL for messages (60 seconds). Make it 45 to allow for
# some fudge room.
MAX_RETRY_POST_DURATION = 45
# NOTE(kgriffs): It is extremely unlikely that all workers would somehow hang
# for more than 5 seconds, without a single one being able to succeed in
# posting some messages and incrementing the counter, thus allowing the other
# producers to succeed in turn.
COUNTER_STALL_WINDOW = 5
# For hinting
ID_INDEX_FIELDS = [('_id', 1)]
# For removing expired messages
TTL_INDEX_FIELDS = [
('e', 1),
]
# to unify use of project/topic across mongodb
# storage impls.
PROJ_TOPIC = utils.PROJ_TOPIC_KEY
# NOTE(kgriffs): This index is for listing messages, usually
# filtering out claimed ones.
ACTIVE_INDEX_FIELDS = [
(PROJ_TOPIC, 1), # Project will be unique, so put first
('k', 1), # Used for sorting and paging, must come before range queries
]
# For counting
COUNTING_INDEX_FIELDS = [
(PROJ_TOPIC, 1), # Project will be unique, so put first
]
# This index is meant to be used as a shard-key and to ensure
# uniqueness for markers.
#
# As for other compound indexes, order matters. The marker `k`
# gives enough cardinality to ensure chunks are evenly distributed,
# whereas the `p_q` field helps keeping chunks from the same project
# and queue together.
#
# In a sharded environment, uniqueness of this index is still guaranteed
# because it's used as a shard key.
MARKER_INDEX_FIELDS = [
('k', 1),
(PROJ_TOPIC, 1),
]
TRANSACTION_INDEX_FIELDS = [
('tx', 1),
]
class MessageController(storage.Message):
"""Implements message resource operations using MongoDB.
Messages are scoped by project + topic.
::
Messages:
Name Field
-------------------------
scope -> p_t
ttl -> t
expires -> e
marker -> k
body -> b
client uuid -> u
transaction -> tx
delay -> d
checksum -> cs
"""
def __init__(self, *args, **kwargs):
super(MessageController, self).__init__(*args, **kwargs)
# Cache for convenience and performance
self._num_partitions = self.driver.mongodb_conf.partitions
self._topic_ctrl = self.driver.topic_controller
self._retry_range = range(self.driver.mongodb_conf.max_attempts)
# Create a list of 'messages' collections, one for each database
# partition, ordered by partition number.
#
# NOTE(kgriffs): Order matters, since it is used to lookup the
# collection by partition number. For example, self._collections[2]
# would provide access to zaqar_p2.messages (partition numbers are
# zero-based).
self._collections = [db.messages
for db in self.driver.message_databases]
# Ensure indexes are initialized before any queries are performed
for collection in self._collections:
self._ensure_indexes(collection)
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
def _ensure_indexes(self, collection):
"""Ensures that all indexes are created."""
collection.create_index(TTL_INDEX_FIELDS,
name='ttl',
expireAfterSeconds=0,
background=True)
collection.create_index(ACTIVE_INDEX_FIELDS,
name='active',
background=True)
collection.create_index(COUNTING_INDEX_FIELDS,
name='counting',
background=True)
collection.create_index(MARKER_INDEX_FIELDS,
name='queue_marker',
background=True)
collection.create_index(TRANSACTION_INDEX_FIELDS,
name='transaction',
background=True)
def _collection(self, topic_name, project=None):
"""Get a partitioned collection instance."""
return self._collections[utils.get_partition(self._num_partitions,
topic_name, project)]
def _backoff_sleep(self, attempt):
"""Sleep between retries using a jitter algorithm.
Mitigates thrashing between multiple parallel requests, and
creates backpressure on clients to slow down the rate
at which they submit requests.
:param attempt: current attempt number, zero-based
"""
conf = self.driver.mongodb_conf
seconds = utils.calculate_backoff(attempt, conf.max_attempts,
conf.max_retry_sleep,
conf.max_retry_jitter)
time.sleep(seconds)
def _purge_topic(self, topic_name, project=None):
"""Removes all messages from the queue.
Warning: Only use this when deleting the queue; otherwise
you can cause a side-effect of reseting the marker counter
which can cause clients to miss tons of messages.
If the queue does not exist, this method fails silently.
:param topic_name: name of the queue to purge
:param project: ID of the project to which the queue belongs
"""
scope = utils.scope_queue_name(topic_name, project)
collection = self._collection(topic_name, project)
collection.delete_many({PROJ_TOPIC: scope})
def _list(self, topic_name, project=None, marker=None,
echo=False, client_uuid=None, projection=None,
include_claimed=False, include_delayed=False,
sort=1, limit=None, count=False):
"""Message document listing helper.
:param topic_name: Name of the topic to list
:param project: (Default None) Project `topic_name` belongs to. If
not specified, queries the "global" namespace/project.
:param marker: (Default None) Message marker from which to start
iterating. If not specified, starts with the first message
available in the topic.
:param echo: (Default False) Whether to return messages that match
client_uuid
:param client_uuid: (Default None) UUID for the client that
originated this request
:param projection: (Default None) a list of field names that should be
returned in the result set or a dict specifying the fields to
include or exclude
:param include_claimed: (Default False) Whether to include
claimed messages, not just active ones
:param include_delayed: (Default False) Whether to include
delayed messages, not just active ones
:param sort: (Default 1) Sort order for the listing. Pass 1 for
ascending (oldest message first), or -1 for descending (newest
message first).
:param limit: (Default None) The maximum number of messages
to list. The results may include fewer messages than the
requested `limit` if not enough are available. If limit is
not specified
:param count: (Default False) If return the count number of cursor
:returns: Generator yielding up to `limit` messages.
"""
if sort not in (1, -1):
raise ValueError(u'sort must be either 1 (ascending) '
u'or -1 (descending)')
now = timeutils.utcnow_ts()
query = {
# Messages must belong to this topic and project.
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
# NOTE(kgriffs): Messages must be finalized (i.e., must not
# be part of an unfinalized transaction).
#
# See also the note wrt 'tx' within the definition
# of ACTIVE_INDEX_FIELDS.
'tx': None,
}
if not echo:
if (client_uuid is not None) and not isinstance(client_uuid,
uuid.UUID):
client_uuid = uuid.UUID(client_uuid)
client_uuid = binary.Binary.from_uuid(client_uuid)
elif isinstance(client_uuid, uuid.UUID):
client_uuid = binary.Binary.from_uuid(client_uuid)
query['u'] = {'$ne': client_uuid}
if marker is not None:
query['k'] = {'$gt': marker}
collection = self._collection(topic_name, project)
if not include_delayed:
# NOTE(cdyangzhenyu): Only include messages that are not
# part of any delay, or are part of an expired delay. if
# the message has no attribute 'd', it will also be obtained.
# This is for compatibility with old data.
query['$or'] = [{'d': {'$lte': now}},
{'d': {'$exists': False}}]
# Construct the request
cursor = collection.find(query,
projection=projection,
sort=[('k', sort)])
ntotal = None
if count:
ntotal = collection.count_documents(query)
if limit is not None:
cursor.limit(limit)
if count:
ntotal = collection.count_documents(query, limit=limit)
# NOTE(flaper87): Suggest the index to use for this query to
# ensure the most performant one is chosen.
if count:
return cursor.hint(ACTIVE_INDEX_FIELDS), ntotal
return cursor.hint(ACTIVE_INDEX_FIELDS)
# ----------------------------------------------------------------------
# "Friends" interface
# ----------------------------------------------------------------------
def _count(self, topic_name, project=None, include_claimed=False):
"""Return total number of messages in a topic.
This method is designed to very quickly count the number
of messages in a given topic. Expired messages are not
counted, of course. If the queue does not exist, the
count will always be 0.
Note: Some expired messages may be included in the count if
they haven't been GC'd yet. This is done for performance.
"""
query = {
# Messages must belong to this queue and project.
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
# NOTE(kgriffs): Messages must be finalized (i.e., must not
# be part of an unfinalized transaction).
#
# See also the note wrt 'tx' within the definition
# of ACTIVE_INDEX_FIELDS.
'tx': None,
}
collection = self._collection(topic_name, project)
return collection.count_documents(filter=query,
hint=COUNTING_INDEX_FIELDS)
def _active(self, topic_name, marker=None, echo=False,
client_uuid=None, projection=None, project=None,
limit=None, include_delayed=False):
return self._list(topic_name, project=project, marker=marker,
echo=echo, client_uuid=client_uuid,
projection=projection, include_claimed=False,
include_delayed=include_delayed, limit=limit)
def _inc_counter(self, topic_name, project=None, amount=1, window=None):
"""Increments the message counter and returns the new value.
:param topic_name: Name of the topic to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises QueueDoesNotExist: if not found
"""
# NOTE(flaper87): If this `if` is True, it means we're
# using a mongodb in the control plane. To avoid breaking
# environments doing so already, we'll keep using the counter
# in the mongodb topic_controller rather than the one in the
# message_controller. This should go away, eventually
if hasattr(self._topic_ctrl, '_inc_counter'):
return self._topic_ctrl._inc_counter(topic_name, project,
amount, window)
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(topic_name, project)
if window is not None:
threshold = now - window
query['c.t'] = {'$lt': threshold}
while True:
try:
collection = self._collection(topic_name, project).stats
doc = collection.find_one_and_update(
query, update,
return_document=pymongo.ReturnDocument.AFTER,
projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error.')
if doc is None:
if window is None:
# NOTE(kgriffs): Since we did not filter by a time window,
# the topic should have been found and updated. Perhaps
# the topic has been deleted?
message = (u'Failed to increment the message '
u'counter for topic %(name)s and '
u'project %(project)s')
message %= dict(name=topic_name, project=project)
LOG.warning(message)
raise errors.TopicDoesNotExist(topic_name, project)
# NOTE(kgriffs): Assume the queue existed, but the counter
# was recently updated, causing the range query on 'c.t' to
# exclude the record.
return None
return doc['c']['v']
def _get_counter(self, topic_name, project=None):
"""Retrieves the current message counter value for a given topic.
This helper is used to generate monotonic pagination
markers that are saved as part of the message
document.
Note 1: Markers are scoped per-queue and so are *not*
globally unique or globally ordered.
Note 2: If two or more requests to this method are made
in parallel, this method will return the same counter
value. This is done intentionally so that the caller
can detect a parallel message post, allowing it to
mitigate race conditions between producer and
observer clients.
:param topic_name: Name of the topic to which the counter is scoped
:param project: Topic's project
:returns: current message counter as an integer
"""
# NOTE(flaper87): If this `if` is True, it means we're
# using a mongodb in the control plane. To avoid breaking
# environments doing so already, we'll keep using the counter
# in the mongodb queue_controller rather than the one in the
# message_controller. This should go away, eventually
if hasattr(self._topic_ctrl, '_get_counter'):
return self._topic_ctrl._get_counter(topic_name, project)
update = {'$inc': {'c.v': 0, 'c.t': 0}}
query = _get_scoped_query(topic_name, project)
try:
collection = self._collection(topic_name, project).stats
doc = collection.find_one_and_update(
query, update, upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
projection={'c.v': 1, '_id': 0})
return doc['c']['v']
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error.')
# ----------------------------------------------------------------------
# Public interface
# ----------------------------------------------------------------------
def list(self, topic_name, project=None, marker=None,
limit=storage.DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None, include_claimed=False,
include_delayed=False):
if marker is not None:
try:
marker = int(marker)
except ValueError:
yield iter([])
messages, ntotal = self._list(topic_name, project=project,
marker=marker,
client_uuid=client_uuid, echo=echo,
include_claimed=include_claimed,
include_delayed=include_delayed,
limit=limit, count=True)
marker_id = {}
now = timeutils.utcnow_ts()
# NOTE (kgriffs) @utils.raises_conn_error not needed on this
# function, since utils.HookedCursor already has it.
def denormalizer(msg):
marker_id['next'] = msg['k']
return _basic_message(msg, now)
yield utils.HookedCursor(messages, denormalizer, ntotal=ntotal)
yield str(marker_id['next'])
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def first(self, topic_name, project=None, sort=1):
cursor = self._list(topic_name, project=project,
include_claimed=True, sort=sort,
limit=1)
try:
message = next(cursor)
except StopIteration:
raise errors.TopicIsEmpty(topic_name, project)
now = timeutils.utcnow_ts()
return _basic_message(message, now)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def get(self, topic_name, message_id, project=None):
mid = utils.to_oid(message_id)
if mid is None:
raise errors.MessageDoesNotExist(message_id, topic_name,
project)
now = timeutils.utcnow_ts()
query = {
'_id': mid,
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
}
collection = self._collection(topic_name, project)
message = list(collection.find(query).limit(1).hint(ID_INDEX_FIELDS))
if not message:
raise errors.MessageDoesNotExist(message_id, topic_name,
project)
return _basic_message(message[0], now)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def bulk_get(self, topic_name, message_ids, project=None):
message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid]
if not message_ids:
return iter([])
now = timeutils.utcnow_ts()
# Base query, always check expire time
query = {
'_id': {'$in': message_ids},
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
}
collection = self._collection(topic_name, project)
# NOTE(flaper87): Should this query
# be sorted?
messages = collection.find(query).hint(ID_INDEX_FIELDS)
ntotal = collection.count_documents(query)
def denormalizer(msg):
return _basic_message(msg, now)
return utils.HookedCursor(messages, denormalizer, ntotal=ntotal)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def post(self, topic_name, messages, client_uuid, project=None):
# NOTE(flaper87): This method should be safe to retry on
# autoreconnect, since we've a 2-step insert for messages.
# The worst-case scenario is that we'll increase the counter
# several times and we'd end up with some non-active messages.
if not self._topic_ctrl.exists(topic_name, project):
raise errors.TopicDoesNotExist(topic_name, project)
# NOTE(flaper87): Make sure the counter exists. This method
# is an upsert.
self._get_counter(topic_name, project)
now = timeutils.utcnow_ts()
now_dt = datetime.datetime.utcfromtimestamp(now)
collection = self._collection(topic_name, project)
messages = list(messages)
msgs_n = len(messages)
next_marker = self._inc_counter(topic_name,
project,
amount=msgs_n) - msgs_n
if (client_uuid is not None) and not isinstance(client_uuid,
uuid.UUID):
client_uuid = uuid.UUID(client_uuid)
client_uuid = binary.Binary.from_uuid(client_uuid)
elif isinstance(client_uuid, uuid.UUID):
client_uuid = binary.Binary.from_uuid(client_uuid)
prepared_messages = []
for index, message in enumerate(messages):
msg = {
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
't': message['ttl'],
'e': now_dt + datetime.timedelta(seconds=message['ttl']),
'u': client_uuid,
'd': now + message.get('delay', 0),
'b': message['body'] if 'body' in message else {},
'k': next_marker + index,
'tx': None
}
if self.driver.conf.enable_checksum:
msg['cs'] = s_utils.get_checksum(message.get('body', None))
prepared_messages.append(msg)
res = collection.insert_many(prepared_messages,
bypass_document_validation=True)
return [str(id_) for id_ in res.inserted_ids]
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def delete(self, topic_name, message_id, project=None, claim=None):
# NOTE(cpp-cabrera): return early - this is an invalid message
# id so we won't be able to find it any way
mid = utils.to_oid(message_id)
if mid is None:
return
collection = self._collection(topic_name, project)
query = {
'_id': mid,
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
}
cid = utils.to_oid(claim)
if cid is None:
raise errors.ClaimDoesNotExist(claim, topic_name, project)
now = timeutils.utcnow_ts()
cursor = collection.find(query).hint(ID_INDEX_FIELDS)
try:
message = next(cursor)
except StopIteration:
return
if claim is None:
if _is_claimed(message, now):
raise errors.MessageIsClaimed(message_id)
else:
if message['c']['id'] != cid:
kwargs = {}
# NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and
# `read_preference` is read only. We'd need to set it when the
# client is created.
# NOTE(kgriffs): Read from primary in case the message
# was just barely claimed, and claim hasn't made it to
# the secondary.
message = collection.find_one(query, **kwargs)
if message['c']['id'] != cid:
if _is_claimed(message, now):
raise errors.MessageNotClaimedBy(message_id, claim)
raise errors.MessageNotClaimed(message_id)
collection.delete_one(query)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def bulk_delete(self, topic_name, message_ids, project=None,
claim_ids=None):
message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid]
if claim_ids:
claim_ids = [cid for cid in map(utils.to_oid, claim_ids) if cid]
query = {
'_id': {'$in': message_ids},
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
}
collection = self._collection(topic_name, project)
if claim_ids:
message_claim_ids = []
messages = collection.find(query).hint(ID_INDEX_FIELDS)
for message in messages:
message_claim_ids.append(message['c']['id'])
for cid in claim_ids:
if cid not in message_claim_ids:
raise errors.ClaimDoesNotExist(cid, topic_name, project)
collection.delete_many(query)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def pop(self, topic_name, limit, project=None):
query = {
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
}
# Only include messages that are not part of
# any claim, or are part of an expired claim.
now = timeutils.utcnow_ts()
query['c.e'] = {'$lte': now}
collection = self._collection(topic_name, project)
projection = {'_id': 1, 't': 1, 'b': 1, 'c.id': 1}
messages = (collection.find_one_and_delete(query,
projection=projection)
for _ in range(limit))
final_messages = [_basic_message(message, now)
for message in messages
if message]
return final_messages
class FIFOMessageController(MessageController):
def _ensure_indexes(self, collection):
"""Ensures that all indexes are created."""
collection.create_index(TTL_INDEX_FIELDS,
name='ttl',
expireAfterSeconds=0,
background=True)
collection.create_index(ACTIVE_INDEX_FIELDS,
name='active',
background=True)
collection.create_index(COUNTING_INDEX_FIELDS,
name='counting',
background=True)
# NOTE(kgriffs): This index must be unique so that
# inserting a message with the same marker to the
# same queue will fail; this is used to detect a
# race condition which can cause an observer client
# to miss a message when there is more than one
# producer posting messages to the same queue, in
# parallel.
collection.create_index(MARKER_INDEX_FIELDS,
name='queue_marker',
unique=True,
background=True)
collection.create_index(TRANSACTION_INDEX_FIELDS,
name='transaction',
background=True)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def post(self, topic_name, messages, client_uuid, project=None):
# NOTE(flaper87): This method should be safe to retry on
# autoreconnect, since we've a 2-step insert for messages.
# The worst-case scenario is that we'll increase the counter
# several times and we'd end up with some non-active messages.
if not self._topic_ctrl.exists(topic_name, project):
raise errors.TopicDoesNotExist(topic_name, project)
# NOTE(flaper87): Make sure the counter exists. This method
# is an upsert.
self._get_counter(topic_name, project)
now = timeutils.utcnow_ts()
now_dt = datetime.datetime.utcfromtimestamp(now)
collection = self._collection(topic_name, project)
# Set the next basis marker for the first attempt.
#
# Note that we don't increment the counter right away because
# if 2 concurrent posts happen and the one with the higher counter
# ends before the one with the lower counter, there's a window
# where a client paging through the queue may get the messages
# with the higher counter and skip the previous ones. This would
# make our FIFO guarantee unsound.
next_marker = self._get_counter(topic_name, project)
# Unique transaction ID to facilitate atomic batch inserts
transaction = objectid.ObjectId()
if (client_uuid is not None) and not isinstance(client_uuid,
uuid.UUID):
client_uuid = uuid.UUID(client_uuid)
client_uuid = binary.Binary.from_uuid(client_uuid)
elif isinstance(client_uuid, uuid.UUID):
client_uuid = binary.Binary.from_uuid(client_uuid)
prepared_messages = []
for index, message in enumerate(messages):
msg = {
PROJ_TOPIC: utils.scope_queue_name(topic_name, project),
't': message['ttl'],
'e': now_dt + datetime.timedelta(seconds=message['ttl']),
'u': client_uuid,
'd': now + message.get('delay', 0),
'b': message['body'] if 'body' in message else {},
'k': next_marker + index,
'tx': None
}
if self.driver.conf.enable_checksum:
msg['cs'] = s_utils.get_checksum(message.get('body', None))
prepared_messages.append(msg)
# NOTE(kgriffs): Don't take the time to do a 2-phase insert
# if there is no way for it to partially succeed.
if len(prepared_messages) == 1:
transaction = None
prepared_messages[0]['tx'] = None
# Use a retry range for sanity, although we expect
# to rarely, if ever, reach the maximum number of
# retries.
#
# NOTE(kgriffs): With the default configuration (100 ms
# max sleep, 1000 max attempts), the max stall time
# before the operation is abandoned is 49.95 seconds.
for attempt in self._retry_range:
try:
res = collection.insert_many(prepared_messages,
bypass_document_validation=True)
# Log a message if we retried, for debugging perf issues
if attempt != 0:
msgtmpl = _(u'%(attempts)d attempt(s) required to post '
u'%(num_messages)d messages to queue '
u'"%(topic)s" under project %(project)s')
LOG.debug(msgtmpl,
dict(topic=topic_name,
attempts=attempt + 1,
num_messages=len(res.inserted_ids),
project=project))
# Update the counter in preparation for the next batch
#
# NOTE(kgriffs): Due to the unique index on the messages
# collection, competing inserts will fail as a whole,
# and keep retrying until the counter is incremented
# such that the competing marker's will start at a
# unique number, 1 past the max of the messages just
# inserted above.
self._inc_counter(topic_name, project,
amount=len(res.inserted_ids))
# NOTE(kgriffs): Finalize the insert once we can say that
# all the messages made it. This makes bulk inserts
# atomic, assuming queries filter out any non-finalized
# messages.
if transaction is not None:
collection.update_many({'tx': transaction},
{'$set': {'tx': None}},
upsert=False)
return [str(id_) for id_ in res.inserted_ids]
except (pymongo.errors.DuplicateKeyError,
pymongo.errors.BulkWriteError):
# TODO(kgriffs): Record stats of how often retries happen,
# and how many attempts, on average, are required to insert
# messages.
# NOTE(kgriffs): This can be used in conjunction with the
# log line, above, that is emitted after all messages have
# been posted, to gauge how long it is taking for messages
# to be posted to a given topic, or overall.
#
# TODO(kgriffs): Add transaction ID to help match up loglines
if attempt == 0:
msgtmpl = _(u'First attempt failed while '
u'adding messages to topic '
u'"%(topic)s" under project %(project)s')
LOG.debug(msgtmpl, dict(topic=topic_name, project=project))
# NOTE(kgriffs): Never retry past the point that competing
# messages expire and are GC'd, since once they are gone,
# the unique index no longer protects us from getting out
# of order, which could cause an observer to miss this
# message. The code below provides a sanity-check to ensure
# this situation can not happen.
elapsed = timeutils.utcnow_ts() - now
if elapsed > MAX_RETRY_POST_DURATION:
msgtmpl = (u'Exceeded maximum retry duration for topic '
u'"%(topic)s" under project %(project)s')
LOG.warning(msgtmpl,
dict(topic=topic_name, project=project))
break
# Chill out for a moment to mitigate thrashing/thundering
self._backoff_sleep(attempt)
# NOTE(kgriffs): Perhaps we failed because a worker crashed
# after inserting messages, but before incrementing the
# counter; that would cause all future requests to stall,
# since they would keep getting the same base marker that is
# conflicting with existing messages, until the messages that
# "won" expire, at which time we would end up reusing markers,
# and that could make some messages invisible to an observer
# that is querying with a marker that is large than the ones
# being reused.
#
# To mitigate this, we apply a heuristic to determine whether
# a counter has stalled. We attempt to increment the counter,
# but only if it hasn't been updated for a few seconds, which
# should mean that nobody is left to update it!
#
# Note that we increment one at a time until the logjam is
# broken, since we don't know how many messages were posted
# by the worker before it crashed.
next_marker = self._inc_counter(
topic_name, project, window=COUNTER_STALL_WINDOW)
# Retry the entire batch with a new sequence of markers.
#
# NOTE(kgriffs): Due to the unique index, and how
# MongoDB works with batch requests, we will never
# end up with a partially-successful update. The first
# document in the batch will fail to insert, and the
# remainder of the documents will not be attempted.
if next_marker is None:
# NOTE(kgriffs): Usually we will end up here, since
# it should be rare that a counter becomes stalled.
next_marker = self._get_counter(
topic_name, project)
else:
msgtmpl = (u'Detected a stalled message counter '
u'for topic "%(topic)s" under '
u'project %(project)s.'
u'The counter was incremented to %(value)d.')
LOG.warning(msgtmpl,
dict(topic=topic_name,
project=project,
value=next_marker))
for index, message in enumerate(prepared_messages):
message['k'] = next_marker + index
except Exception:
LOG.exception('Error parsing document.')
raise
msgtmpl = (u'Hit maximum number of attempts (%(max)s) for topic '
u'"%(topic)s" under project %(project)s')
LOG.warning(msgtmpl,
dict(max=self.driver.mongodb_conf.max_attempts,
topic=topic_name,
project=project))
raise errors.MessageConflict(topic_name, project)
def _is_claimed(msg, now):
return (msg['c']['id'] is not None and
msg['c']['e'] > now)
def _basic_message(msg, now):
oid = msg['_id']
age = now - utils.oid_ts(oid)
res = {
'id': str(oid),
'age': int(age),
'ttl': msg['t'],
'body': msg['b']
}
if msg.get('cs'):
res['checksum'] = msg.get('cs')
return res
class MessageTopicHandler(object):
def __init__(self, driver, control_driver):
self.driver = driver
self._cache = self.driver.cache
self.topic_controller = self.driver.topic_controller
self.message_controller = self.driver.message_controller
def delete(self, topic_name, project=None):
self.message_controller._purge_queue(topic_name, project)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def stats(self, name, project=None):
if not self.topic_controller.exists(name, project=project):
raise errors.TopicDoesNotExist(name, project)
controller = self.message_controller
total = controller._count(name, project=project,
include_claimed=True)
message_stats = {
'total': total,
}
try:
oldest = controller.first(name, project=project, sort=1)
newest = controller.first(name, project=project, sort=-1)
except errors.QueueIsEmpty:
pass
else:
now = timeutils.utcnow_ts()
message_stats['oldest'] = utils.stat_message(oldest, now)
message_stats['newest'] = utils.stat_message(newest, now)
return {'messages': message_stats}
def _get_scoped_query(name, project):
return {'p_t': utils.scope_queue_name(name, project)}
| openstack/zaqar | zaqar/storage/mongodb/topic_messages.py | Python | apache-2.0 | 40,290 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
version = (0, "5c")
__title__ = "py-elevator"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
from .client import Elevator
from .batch import WriteBatch
| oleiade/py-elevator | pyelevator/__init__.py | Python | mit | 301 |
# -*- coding: utf-8 -*- vim:encoding=utf-8:
# vim: tabstop=4:shiftwidth=4:softtabstop=4:expandtab
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
from django.core.management.base import BaseCommand
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.core.mail.message import EmailMessage
class Command(BaseCommand):
args = ''
help = 'Fetches the idle users; users that have not logged in for the last %s days' % (
settings.IDLE_ACCOUNT_NOTIFICATION_DAYS
)
def handle(self, *args, **options):
idle_users = []
idle_users.extend(
[{'email': u.email, 'username': u.username} for u in User.objects.filter(
is_active=True,
last_login__lte=datetime.datetime.now() - datetime.timedelta(
days=int(settings.IDLE_ACCOUNT_NOTIFICATION_DAYS)
)
) if u.email])
if idle_users:
if hasattr(settings, 'BRANDING'):
service = settings.BRANDING
for user in idle_users:
email = render_to_string(
"users/emails/idle_account.txt",
{
"site": Site.objects.get_current(),
"days": settings.IDLE_ACCOUNT_NOTIFICATION_DAYS,
"service": service,
"user": user
}
)
self.send_new_mail(
_("%sIdle Account Notification") % settings.EMAIL_SUBJECT_PREFIX,
email,
settings.SERVER_EMAIL,
[user.get('email')],
[],
)
def send_new_mail(
self,
subject,
message,
from_email,
recipient_list,
bcc_list
):
return EmailMessage(subject, message, from_email, recipient_list, bcc_list).send()
| irregulator/ganetimgr | accounts/management/commands/idle_accounts.py | Python | gpl-3.0 | 2,811 |
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = input()
# Print a string literal saying "Hello, World." to stdout.
print ('Hello, World.')
print (inputString)
# TODO: Write a line of code here that prints the contents of input_string to stdout. | vipmunot/HackerRank | 30 Days of Code/Day0 - Hello World.py | Python | mit | 311 |
#-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from crm import crm
from datetime import datetime
from osv import fields,osv
from tools.translate import _
import binascii
import time
import tools
from crm import wizard
wizard.mail_compose_message.SUPPORTED_MODELS.append('project.issue')
class project_issue_version(osv.osv):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', size=32, required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
project_issue_version()
class project_issue(crm.crm_case, osv.osv):
_name = "project.issue"
_description = "Project Issue"
_order = "priority, create_date desc"
_inherit = ['mail.thread']
def write(self, cr, uid, ids, vals, context=None):
#Update last action date everytime the user change the stage, the state or send a new email
logged_fields = ['type_id', 'state', 'message_ids']
if any([field in vals for field in logged_fields]):
vals['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S')
return super(project_issue, self).write(cr, uid, ids, vals, context)
def case_open(self, cr, uid, ids, *args):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
res = super(project_issue, self).case_open(cr, uid, ids, *args)
self.write(cr, uid, ids, {'date_open': time.strftime('%Y-%m-%d %H:%M:%S'), 'user_id' : uid})
for (id, name) in self.name_get(cr, uid, ids):
message = _("Issue '%s' has been opened.") % name
self.log(cr, uid, id, message)
return res
def case_close(self, cr, uid, ids, *args):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
res = super(project_issue, self).case_close(cr, uid, ids, *args)
for (id, name) in self.name_get(cr, uid, ids):
message = _("Issue '%s' has been closed.") % name
self.log(cr, uid, id, message)
return res
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
res[issue.id] = {}
for field in fields:
duration = 0
ans = False
hours = 0
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
if field in ['working_hours_open','day_open']:
if issue.date_open:
date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = issue.date_open
#Calculating no. of working hours to open the issue
hours = cal_obj.interval_hours_get(cr, uid, issue.project_id.resource_calendar_id.id,
date_create,
date_open)
elif field in ['working_hours_close','day_close']:
if issue.date_closed:
date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
date_until = issue.date_closed
ans = date_close - date_create
#Calculating no. of working hours to close the issue
hours = cal_obj.interval_hours_get(cr, uid, issue.project_id.resource_calendar_id.id,
date_create,
date_close)
elif field in ['days_since_creation']:
if issue.create_date:
days_since_creation = datetime.today() - datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
res[issue.id][field] = days_since_creation.days
continue
elif field in ['inactivity_days']:
res[issue.id][field] = 0
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, '%Y-%m-%d %H:%M:%S')
res[issue.id][field] = inactive_days.days
continue
if ans:
resource_id = False
if issue.user_id:
resource_ids = res_obj.search(cr, uid, [('user_id','=',issue.user_id.id)])
if resource_ids and len(resource_ids):
resource_id = resource_ids[0]
duration = float(ans.days)
if issue.project_id and issue.project_id.resource_calendar_id:
duration = float(ans.days) * 24
new_dates = cal_obj.interval_min_get(cr, uid,
issue.project_id.resource_calendar_id.id,
date_create,
duration, resource=resource_id)
no_days = []
date_until = datetime.strptime(date_until, '%Y-%m-%d %H:%M:%S')
for in_time, out_time in new_dates:
if in_time.date not in no_days:
no_days.append(in_time.date)
if out_time > date_until:
break
duration = len(no_days)
if field in ['working_hours_open','working_hours_close']:
res[issue.id][field] = hours
else:
res[issue.id][field] = abs(float(duration))
return res
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', size=128, required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True,select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Partner', select=1),
'partner_address_id': fields.many2one('res.partner.address', 'Partner Contact', \
domain="[('partner_id','=',partner_id)]"),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection([('draft', 'New'), ('open', 'In Progress'), ('cancel', 'Cancelled'), ('done', 'Done'),('pending', 'Pending'), ], 'State', size=16, readonly=True,
help='The state is set to \'Draft\', when a case is created.\
\nIf the case is in progress the state is set to \'Open\'.\
\nWhen the case is over, the state is set to \'Done\'.\
\nIf the case needs to be reviewed then the state is set to \'Pending\'.'),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Opened', readonly=True,select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True,select=True),
'date': fields.datetime('Date'),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel."),
'categ_id': fields.many2one('crm.case.categ', 'Category', domain="[('object_id.model', '=', 'crm.project.bug')]"),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'type_id': fields.many2one ('project.task.type', 'Stages', domain="[('project_ids', '=', project_id)]"),
'project_id':fields.many2one('project.project', 'Project'),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='compute_day', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='compute_day', type="float", store=True),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1),
'working_hours_open': fields.function(_compute_day, string='Working Hours to Open the Issue', \
multi='compute_day', type="float", store=True),
'working_hours_close': fields.function(_compute_day, string='Working Hours to Close the Issue', \
multi='compute_day', type="float", store=True),
'inactivity_days': fields.function(_compute_day, string='Days since last action', \
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'user_email', type='char', string='User Email', readonly=True),
'message_ids': fields.one2many('mail.message', 'res_id', 'Messages', domain=[('model','=',_name)]),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['progress'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
def _get_project(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.context_project_id:
return user.context_project_id.id
return False
def on_change_project(self, cr, uid, ids, project_id, context=None):
return {}
_defaults = {
'active': 1,
'partner_id': crm.crm_case._get_default_partner,
'partner_address_id': crm.crm_case._get_default_partner_address,
'email_from': crm.crm_case._get_default_email,
'state': 'draft',
'section_id': crm.crm_case._get_section,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': crm.AVAILABLE_PRIORITIES[2][0],
'project_id':_get_project,
'categ_id' : lambda *a: False,
}
def set_priority(self, cr, uid, ids, priority):
"""Set lead priority
"""
return self.write(cr, uid, ids, {'priority' : priority})
def set_high_priority(self, cr, uid, ids, *args):
"""Set lead priority to high
"""
return self.set_priority(cr, uid, ids, '1')
def set_normal_priority(self, cr, uid, ids, *args):
"""Set lead priority to normal
"""
return self.set_priority(cr, uid, ids, '3')
def convert_issue_task(self, cr, uid, ids, context=None):
case_obj = self.pool.get('project.issue')
data_obj = self.pool.get('ir.model.data')
task_obj = self.pool.get('project.task')
if context is None:
context = {}
result = data_obj._get_id(cr, uid, 'project', 'view_task_search_form')
res = data_obj.read(cr, uid, result, ['res_id'])
id2 = data_obj._get_id(cr, uid, 'project', 'view_task_form2')
id3 = data_obj._get_id(cr, uid, 'project', 'view_task_tree2')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
for bug in case_obj.browse(cr, uid, ids, context=context):
new_task_id = task_obj.create(cr, uid, {
'name': bug.name,
'partner_id': bug.partner_id.id,
'description':bug.description,
'date_deadline': bug.date,
'project_id': bug.project_id.id,
# priority must be in ['0','1','2','3','4'], while bug.priority is in ['1','2','3','4','5']
'priority': str(int(bug.priority) - 1),
'user_id': bug.user_id.id,
'planned_hours': 0.0,
})
vals = {
'task_id': new_task_id,
'state':'pending'
}
case_obj.write(cr, uid, [bug.id], vals)
return {
'name': _('Tasks'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.task',
'res_id': int(new_task_id),
'view_id': False,
'views': [(id2,'form'),(id3,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'search_view_id': res['res_id'],
'nodestroy': True
}
def _convert(self, cr, uid, ids, xml_id, context=None):
data_obj = self.pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'project_issue', xml_id)
categ_id = False
if id2:
categ_id = data_obj.browse(cr, uid, id2, context=context).res_id
if categ_id:
self.write(cr, uid, ids, {'categ_id': categ_id})
return True
def convert_to_feature(self, cr, uid, ids, context=None):
return self._convert(cr, uid, ids, 'feature_request_categ', context=context)
def convert_to_bug(self, cr, uid, ids, context=None):
return self._convert(cr, uid, ids, 'bug_categ', context=context)
def next_type(self, cr, uid, ids, *args):
for task in self.browse(cr, uid, ids):
typeid = task.type_id.id
types = map(lambda x:x.id, task.project_id.type_ids or [])
if types:
if not typeid:
self.write(cr, uid, task.id, {'type_id': types[0]})
elif typeid and typeid in types and types.index(typeid) != len(types)-1 :
index = types.index(typeid)
self.write(cr, uid, task.id, {'type_id': types[index+1]})
return True
def prev_type(self, cr, uid, ids, *args):
for task in self.browse(cr, uid, ids):
typeid = task.type_id.id
types = map(lambda x:x.id, task.project_id and task.project_id.type_ids or [])
if types:
if typeid and typeid in types:
index = types.index(typeid)
self.write(cr, uid, task.id, {'type_id': index and types[index-1] or False})
return True
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
result = {}
if not task_id:
return {'value':{}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value':{'user_id': task.user_id.id,}}
def case_escalate(self, cr, uid, ids, *args):
"""Escalates case to top level
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case Ids
@param *args: Tuple Value for additional Params
"""
cases = self.browse(cr, uid, ids)
for case in cases:
data = {'state' : 'draft'}
if case.project_id.project_escalation_id:
data['project_id'] = case.project_id.project_escalation_id.id
if case.project_id.project_escalation_id.user_id:
data['user_id'] = case.project_id.project_escalation_id.user_id.id
if case.task_id:
self.pool.get('project.task').write(cr, uid, [case.task_id.id], {'project_id': data['project_id'], 'user_id': False})
else:
raise osv.except_osv(_('Warning !'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
self.write(cr, uid, [case.id], data)
self.message_append(cr, uid, cases, _('Escalate'))
return True
def message_new(self, cr, uid, msg, custom_values=None, context=None):
"""Automatically called when new email message arrives"""
if context is None:
context = {}
subject = msg.get('subject') or _('No Title')
body = msg.get('body_text')
msg_from = msg.get('from')
priority = msg.get('priority')
vals = {
'name': subject,
'email_from': msg_from,
'email_cc': msg.get('cc'),
'description': body,
'user_id': False,
}
if priority:
vals['priority'] = priority
vals.update(self.message_partner_by_email(cr, uid, msg_from))
context.update({'state_to' : 'draft'})
if custom_values and isinstance(custom_values, dict):
vals.update(custom_values)
res_id = self.create(cr, uid, vals, context)
self.message_append_dict(cr, uid, [res_id], msg, context=context)
if 'categ_id' not in vals:
self.convert_to_bug(cr, uid, [res_id], context=context)
return res_id
def message_update(self, cr, uid, ids, msg, vals=None, default_act='pending', context=None):
if vals is None:
vals = {}
if isinstance(ids, (str, int, long)):
ids = [ids]
vals.update({
'description': msg['body_text']
})
if msg.get('priority', False):
vals['priority'] = msg.get('priority')
maps = {
'cost': 'planned_cost',
'revenue': 'planned_revenue',
'probability': 'probability'
}
# Reassign the 'open' state to the case if this one is in pending or done
for record in self.browse(cr, uid, ids, context=context):
if record.state in ('pending', 'done'):
record.write({'state' : 'open'})
vls = { }
for line in msg['body_text'].split('\n'):
line = line.strip()
res = tools.misc.command_re.match(line)
if res and maps.get(res.group(1).lower(), False):
key = maps.get(res.group(1).lower())
vls[key] = res.group(2).lower()
vals.update(vls)
res = self.write(cr, uid, ids, vals)
self.message_append_dict(cr, uid, ids, msg, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, id, ['name'], context=context)
if not default:
default = {}
default = default.copy()
default['name'] = issue['name'] + _(' (copy)')
return super(project_issue, self).copy(cr, uid, id, default=default,
context=context)
project_issue()
class project(osv.osv):
_inherit = "project.project"
_columns = {
'project_escalation_id' : fields.many2one('project.project','Project Escalation', help='If any issue is escalated from the current Project, it will be listed under the project selected here.', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'reply_to' : fields.char('Reply-To Email Address', size=256)
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
project()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ksrajkumar/openerp-6.1 | openerp/addons/project_issue/project_issue.py | Python | agpl-3.0 | 23,930 |
# coding: utf-8
__author__ = 'Junki Ishida'
from ._compat import str_types, int_types, int_or_float_types, raise_with_inner, PY2
from .exceptions import FormatError
from decimal import Decimal
from datetime import datetime, date
try:
import dateutil.parser
except ImportError:
pass
def str_to_str(value):
if isinstance(value, str_types):
return value
raise FormatError()
def str_to_unicode(value):
if isinstance(value, str_types):
if PY2 and isinstance(value, bytes):
value = value.decode()
return value
raise FormatError()
def str_or_none_to_str(value):
if value is None:
return ''
if isinstance(value, str_types):
return value
raise FormatError()
def str_or_none_to_unicode(value):
if value is None:
value = ''
if isinstance(value, str_types):
if PY2 and isinstance(value, bytes):
value = value.decode()
return value
raise FormatError()
def str_to_int(value):
if isinstance(value, str_types):
return int(value)
raise FormatError()
def str_to_float(value):
if isinstance(value, str_types):
return float(value)
raise FormatError()
def str_to_decimal(value):
if isinstance(value, str_types):
return Decimal(value)
raise FormatError()
def str_to_bool(value):
if isinstance(value, str_types):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
raise FormatError()
def int_to_str(value):
if isinstance(value, int_types):
return str(value)
raise FormatError()
def int_to_int(value):
if isinstance(value, int_types):
return value
raise FormatError()
def float_to_float(value):
if isinstance(value, float):
return value
raise FormatError()
def int_or_float_to_float(value):
if isinstance(value, int_types):
return float(value)
if isinstance(value, float):
return value
raise FormatError()
def int_or_float_to_str(value):
if isinstance(value, int_or_float_types):
return str(value)
raise FormatError()
def float_to_decimal(value):
if isinstance(value, float):
return Decimal(str(float))
raise FormatError()
def int_or_float_to_decimal(value):
if isinstance(value, int_types):
return Decimal(value)
if isinstance(value, float):
return Decimal(str(float))
raise FormatError()
def bool_to_str(value):
if isinstance(value, bool):
return 'true' if value else 'false'
raise FormatError()
def bool_to_bool(value):
if isinstance(value, bool):
return value
raise FormatError()
def decimal_to_str(value):
if isinstance(value, Decimal):
return str(value)
raise FormatError()
def str_to_datetime(value, format, timezone, flexible):
if not isinstance(value, str_types):
raise FormatError()
try:
if flexible:
result = dateutil.parser.parse(value)
else:
result = datetime.strptime(value, format)
if timezone:
return result.astimezone(timezone)
except ValueError as e:
raise_with_inner(FormatError, e)
return result
def datetime_to_str(value, format, timezone, flexible):
if not isinstance(value, datetime):
raise FormatError()
if timezone:
try:
value = value.astimezone(timezone)
except ValueError as e:
raise_with_inner(FormatError, e)
return value.strftime(format)
def str_to_date(value, format):
if not isinstance(value, str_types):
raise FormatError()
try:
dt = datetime.strptime(value, format)
except ValueError as e:
raise_with_inner(FormatError, e)
return date(dt.year, dt.month, dt.day)
def date_to_str(value, format):
if not isinstance(value, date):
raise FormatError()
return value.strftime(format)
def str_to_enum(value, values):
if value not in values:
raise FormatError()
return value
def enum_to_str(value, values):
if value not in values:
raise FormatError()
return value
| gomafutofu/mbserializer | mbserializer/converters.py | Python | mit | 4,218 |
# cobra.flux_analysis.reaction.py
# functions for analyzing / creating objective functions
from ..core.Reaction import Reaction
from six import iteritems
def assess(model, reaction, flux_coefficient_cutoff=0.001):
"""Assesses the capacity of the model to produce the precursors for the
reaction and absorb the production of the reaction while the reaction is
operating at, or above, the specified cutoff.
model: A :class:`~cobra.core.Model` object
reaction: A :class:`~cobra.core.Reaction` object
flux_coefficient_cutoff: Float. The minimum flux that reaction must carry
to be considered active.
returns: True if the model can produce the precursors and absorb the
products for the reaction operating at, or above, flux_coefficient_cutoff.
Otherwise, a dictionary of {'precursor': Status, 'product': Status}. Where
Status is the results from assess_precursors and assess_products,
respectively.
"""
reaction = model.reactions.get_by_id(reaction.id)
model.optimize(new_objective={reaction: 1})
if model.solution.f >= flux_coefficient_cutoff:
return True
else:
results = {}
results['precursors'] = assess_precursors(
model, reaction, flux_coefficient_cutoff)
results['products'] = assess_products(
model, reaction, flux_coefficient_cutoff)
return results
def assess_precursors(model, reaction, flux_coefficient_cutoff=0.001):
"""Assesses the ability of the model to provide sufficient precursors for
a reaction operating at, or beyond, the specified cutoff.
model: A :class:`~cobra.core.Model` object
reaction: A :class:`~cobra.core.Reaction` object
flux_coefficient_cutoff: Float. The minimum flux that reaction must carry
to be considered active.
returns: True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce each
individual precursor at the specified threshold but not all precursors at
the required level simultaneously. Otherwise a dictionary of the required
and the produced fluxes for each reactant that is not produced in
sufficient quantities.
"""
model = model.copy()
reaction = model.reactions.get_by_id(reaction.id)
model.optimize(new_objective={reaction: 1})
if model.solution.f >= flux_coefficient_cutoff:
return True
#
simulation_results = {}
# build the sink reactions and add all at once
sink_reactions = {}
for the_component in reaction.get_reactants():
# add in a sink reaction for each component
sink_reaction = Reaction('test_sink_%s' % the_component.id)
# then simulate production ability
# then check it can exceed objective cutoff * component stoichiometric
# coefficient.
coefficient = reaction.get_coefficient(the_component)
sink_reaction.add_metabolites({the_component: coefficient})
sink_reaction.upper_bound = 1000
sink_reactions[sink_reaction] = (the_component, coefficient)
# First assess whether all precursors can pbe produced simultaneously
super_sink = Reaction("super_sink")
for reaction in sink_reactions:
super_sink += reaction
super_sink.id = 'super_sink'
model.add_reactions(sink_reactions.keys() + [super_sink])
model.optimize(new_objective=super_sink)
if flux_coefficient_cutoff <= model.solution.f:
return True
# Otherwise assess the ability of the model to produce each precursor
# individually. Now assess the ability of the model to produce each
# reactant for a reaction
for sink_reaction, (component, coefficient) in iteritems(sink_reactions):
# Calculate the maximum amount of the
model.optimize(new_objective=sink_reaction)
# metabolite that can be produced.
if flux_coefficient_cutoff > model.solution.f:
# Scale the results to a single unit
simulation_results.update({
component:
{
'required': flux_coefficient_cutoff / abs(coefficient),
'produced': model.solution.f / abs(coefficient)
}
})
if len(simulation_results) == 0:
simulation_results = False
return simulation_results
def assess_products(model, reaction, flux_coefficient_cutoff=0.001):
"""Assesses whether the model has the capacity to absorb the products of
a reaction at a given flux rate. Useful for identifying which components
might be blocking a reaction from achieving a specific flux rate.
model: A :class:`~cobra.core.Model` object
reaction: A :class:`~cobra.core.Reaction` object
flux_coefficient_cutoff: Float. The minimum flux that reaction must carry
to be considered active.
returns: True if the model has the capacity to absorb all the reaction
products being simultaneously given the specified cutoff. False, if the
model has the capacity to absorb each individual product but not all
products at the required level simultaneously. Otherwise a dictionary of
the required and the capacity fluxes for each product that is not absorbed
in sufficient quantities.
"""
model = model.copy()
reaction = model.reactions.get_by_id(reaction.id)
model.optimize(new_objective={reaction: 1})
if model.solution.f >= flux_coefficient_cutoff:
return True
#
simulation_results = {}
# build the sink reactions and add all at once
source_reactions = {}
for the_component in reaction.get_products():
# add in a sink reaction for each component
source_reaction = Reaction('test_source_%s' % the_component.id)
# then simulate production ability
# then check it can exceed objective cutoff * component stoichiometric
# coefficient.
coefficient = reaction.get_coefficient(the_component)
source_reaction.add_metabolites({the_component: coefficient})
source_reaction.upper_bound = 1000
source_reactions[source_reaction] = (the_component, coefficient)
#
super_source = Reaction('super_source')
for reaction in source_reactions:
super_source += reaction
super_source.id = 'super_source'
model.add_reactions(source_reactions.keys() + [super_source])
model.optimize(new_objective=super_source)
if flux_coefficient_cutoff <= model.solution.f:
return True
# Now assess the ability of the model to produce each reactant for a
# reaction
for source_reaction, (component, coefficient) in \
iteritems(source_reactions):
# Calculate the maximum amount of the
model.optimize(new_objective=source_reaction)
# metabolite that can be produced.
if flux_coefficient_cutoff > model.solution.f:
# Scale the results to a single unit
simulation_results.update({
component:
{
'required': flux_coefficient_cutoff / abs(coefficient),
'capacity': model.solution.f / abs(coefficient)}
}
)
if len(simulation_results) == 0:
simulation_results = False
return simulation_results
| aebrahim/cobrapy | cobra/flux_analysis/reaction.py | Python | lgpl-2.1 | 7,333 |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.db import models
from datetime import datetime
from utils.common_utils import *
# Create your models here.
class UserProfile(AbstractUser):
nickname = models.CharField(max_length=60, verbose_name='昵称')
birthday = models.DateField(blank=True, null=True, verbose_name='生日')
gender = models.CharField(choices=(('male', '男'), ('female', '女')), max_length=10, verbose_name='性别')
address = models.CharField(max_length=200, verbose_name='地址')
mobile = models.CharField(max_length=20, verbose_name='移动电话', blank=True, null=True)
image = models.ImageField(max_length=200, upload_to='image/%Y/%m', default='image/default/avatar.png', verbose_name='头像')
website = models.CharField(max_length=50, verbose_name='个人网站', blank=True, default='')
fav_topic_nums = models.IntegerField(default=0, verbose_name='收藏主题数')
fav_node_nums = models.IntegerField(default=0, verbose_name='收藏节点数')
fav_user_nums = models.IntegerField(default=0, verbose_name='收藏用户数')
class Meta:
verbose_name = '用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=50, verbose_name='验证码')
email = models.EmailField(max_length=50, verbose_name='邮箱')
send_type = models.CharField(choices=(('用户注册', 'register'), ('忘记密码', 'forgot_pwd'),
('更改注册邮箱', 'update_email')), verbose_name='验证码类型', max_length=50)
send_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '邮箱验证码'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.send_type + self.email
| unknowfly/npa-bbs | NpaForum/apps/users/models.py | Python | mit | 1,981 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import random
from fife import fife
from fife.extensions import pychan
from fife.extensions.pychan.tools import callbackWithArguments as cbwa
from fife.extensions.fife_timer import Timer
import scripts.test as test
class KeyListener(fife.IKeyListener):
def __init__(self, test):
self._engine = test._engine
self._test = test
self._eventmanager = self._engine.getEventManager()
fife.IKeyListener.__init__(self)
def keyPressed(self, evt):
keyval = evt.getKey().getValue()
keystr = evt.getKey().getAsString().lower()
if keystr == "t":
r = self._test._camera.getRenderer('GridRenderer')
r.setEnabled(not r.isEnabled())
def keyReleased(self, evt):
pass
class MouseListener(fife.IMouseListener):
def __init__(self, test):
self._engine = test._engine
self._test = test
self._eventmanager = self._engine.getEventManager()
fife.IMouseListener.__init__(self)
def mousePressed(self, event):
if event.isConsumedByWidgets():
return
clickpoint = fife.ScreenPoint(event.getX(), event.getY())
self._test.movePlayer(clickpoint)
def mouseReleased(self, event):
pass
def mouseMoved(self, event):
self._test.mouseMoved(event)
def mouseEntered(self, event):
pass
def mouseExited(self, event):
pass
def mouseClicked(self, event):
pass
def mouseWheelMovedUp(self, event):
self._test.setZoom(-0.1)
def mouseWheelMovedDown(self, event):
self._test.setZoom(0.1)
def mouseDragged(self, event):
pass
class InstanceActionListener(fife.InstanceActionListener):
def __init__(self, test):
self._engine = test._engine
self._test = test
fife.InstanceActionListener.__init__(self)
def onInstanceActionFinished(self, instance, action):
instance.move('walk', self._test.createRandomTarget(), 4.0)
def onInstanceActionCancelled(self, instance, action):
pass
def onInstanceActionFrame(self, instance, action, frame):
pass
class MultiPathfinderTest(test.Test):
def create(self, engine, application):
self._application = application
self._engine = engine
self._running = False
self._loader = fife.MapLoader(self._engine.getModel(),
self._engine.getVFS(),
self._engine.getImageManager(),
self._engine.getRenderBackend())
self._eventmanager = self._engine.getEventManager()
self._imagemanager = self._engine.getImageManager()
def destroy(self):
#any left over cleanup here
pass
def run(self):
random.seed()
self._running = True
self._mouselistener = MouseListener(self)
self._eventmanager.addMouseListener(self._mouselistener)
self._keylistener = KeyListener(self)
self._eventmanager.addKeyListener(self._keylistener)
self._actionlistener = InstanceActionListener(self)
self._font = pychan.internal.get_manager().createFont("data/fonts/rpgfont.png")
if self._font is None:
raise InitializationError("Could not load font %s" % name)
self.loadMap("data/maps/multipathfinder_grassland.xml")
def stop(self):
self._running = False
self._engine.getModel().deleteMap(self._map)
self._engine.getModel().deleteObjects()
self._eventmanager.removeMouseListener(self._mouselistener)
self._eventmanager.removeKeyListener(self._keylistener)
del self._mouselistener
del self._keylistener
def isRunning(self):
return self._running
def getName(self):
return "MultiPathfinderTest"
def getAuthor(self):
return "helios"
def getDescription(self):
return "Use this as a template for more complicated tests."
def getHelp(self):
return open( 'data/help/MultiPathfinderTest.txt', 'r' ).read()
def pump(self):
"""
This gets called every frame that the test is running. We have nothing
to do here for this test.
"""
pass
def loadMap(self, filename):
"""
Simple function to load and display a map file. We could of course
have passed in the map filename but I'll leave that up to you.
@param filename The filename.
"""
self._mapfilename = filename
if self._loader.isLoadable(self._mapfilename):
self._map = self._loader.load(self._mapfilename)
self._mapLoaded = True
self._camera = self._map.getCamera("camera1")
self._actorlayer = self._map.getLayer("item_layer")
self._groundlayer = self._map.getLayer("ground_layer")
self._player = self._actorlayer.getInstance("player")
self._frigate1 = self._actorlayer.getInstance("frigate1")
self._frigate1.actOnce("stand", self._frigate1.getFacingLocation())
self._frigate1.addActionListener(self._actionlistener)
self._frigate2 = self._actorlayer.getInstance("frigate2")
self._frigate2.actOnce("stand", self._frigate2.getFacingLocation())
self._frigate2.addActionListener(self._actionlistener)
self._camera.setLocation(self._player.getLocation())
self._camera.attach(self._player)
self._instance_renderer = fife.InstanceRenderer.getInstance(self._camera)
cellrenderer = fife.CellRenderer.getInstance(self._camera)
cellrenderer.addActiveLayer(self._actorlayer)
cellrenderer.setEnabledBlocking(True)
cellrenderer.setEnabledPathVisual(True)
cellrenderer.addPathVisual(self._player)
cellrenderer.addPathVisual(self._frigate1)
cellrenderer.addPathVisual(self._frigate2)
cellrenderer.setEnabled(True)
coordinaterenderer = fife.CoordinateRenderer.getInstance(self._camera)
coordinaterenderer.setFont(self._font)
coordinaterenderer.addActiveLayer(self._actorlayer)
#coordinaterenderer.setEnabled(True)
gridrenderer = self._camera.getRenderer('GridRenderer')
gridrenderer.activateAllLayers(self._map)
def mouseMoved(self, event):
self._instance_renderer.removeOutlined(self._player)
pt = fife.ScreenPoint(event.getX(), event.getY())
instances = self._camera.getMatchingInstances(pt, self._actorlayer)
for i in instances:
if i.getId() == "player":
self._instance_renderer.addOutlined(i, 173, 255, 47, 2, 250)
break
def setZoom(self, zoom):
self._camera.setZoom(self._camera.getZoom() + zoom)
def getLocationAt(self, screenpoint):
"""
Query the main camera for the Map location (on the actor layer)
that a screen point refers to.
@param screenpoint A fife.ScreenPoint
"""
target_mapcoord = self._camera.toMapCoordinates(screenpoint, False)
target_mapcoord.z = 0
location = fife.Location(self._actorlayer)
location.setMapCoordinates(target_mapcoord)
return location
def createRandomTarget(self):
x = random.randint(-13, 22)
y = random.randint(-28, 13)
mc = fife.ModelCoordinate(x,y)
location = fife.Location(self._actorlayer)
location.setLayerCoordinates(mc)
return location
def movePlayer(self, screenpoint):
"""
Simple function that moves the player instance to the given screenpoint.
@param screenpoint A fife.ScreenPoint
"""
self._player.move('walk', self.getLocationAt(screenpoint), 4.0)
| drolando/SoftDev | tests/fife_test/tests/MultiPathfinderTest.py | Python | lgpl-2.1 | 7,856 |
"""
Prepare Sparse Matrix for Sparse Affinity Propagation Clustering (SAP)
"""
# Authors: Huojun Cao <bioinfocao at gmail.com>
# License: BSD 3 clause
import numpy as np
import pandas as pd
import sparseAP_cy # cython for calculation
############################################################################################
#
def copySym(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds):
"""
For single col items or single row items, copy sym minimal value
For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B),
then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point)
"""
copy_row_array,copy_col_array,copy_data_array=sparseAP_cy.copySingleRows(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds)
#if symCopy=='all':
#rowBased_row_array=np.concatenate((rowBased_row_array,copy_col_array))
#rowBased_col_array=np.concatenate((rowBased_col_array,copy_row_array))
#rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_array))
#else:# symCopy=='min' or others will be treated as 'min'
df = pd.DataFrame(zip(copy_row_array,copy_col_array,copy_data_array), columns=['row', 'col', 'data'])
copy_row_list,copy_col_list,copy_data_list=[],[],[]
for ind in singleRowInds:
copyData=df[(df.col==ind) & (df.row!=ind)].sort_values(['data']).copy()
copyData_min=copyData[0:1]
copy_row_list+=list(copyData_min.col)
copy_col_list+=list(copyData_min.row)
copy_data_list+=list(copyData_min.data)
rowBased_row_array=np.concatenate((rowBased_row_array,copy_row_list))
rowBased_col_array=np.concatenate((rowBased_col_array,copy_col_list))
rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_list))
return rowBased_row_array,rowBased_col_array,rowBased_data_array
def rmSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,nSamplesOri):
"""
Affinity/similarity matrix does not need be symmetric, that is s(A,B) does not need be same as s(B,A).
Also since Affinity/similarity matrix is sparse, it could be that s(A,B) exist but s(B,A) does not exist in the sparse matrix.
For the FSAPC to work, specifically in computation of R and A matrix, each row/column of Affinity/similarity matrix should have at least two datapoints.
So in FSAPC, we first remove samples that do not have affinity/similarity with other samples, that is samples that only have affinity/similarity with itself
And we remove samples only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist
In these two cases, these samples are removed from FSAPC computation and their examplers are set to themself.
For samples that only have one data (affinity/similarity) with others, For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B),
and there exist at least one value in [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point)
then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...]
nSamplesOri is the number of samples of orignail input data
"""
# find rows and cols that only have one datapoint
singleRowInds=set(sparseAP_cy.singleItems(rowBased_row_array))
singleColInds=set(sparseAP_cy.singleItems(rowBased_col_array))
# samples that have one datapoint in row and col are samples only have affinity/similarity with itself
singleSampleInds=singleRowInds & singleColInds
# in case every col/row have more than one datapoint, just return original data
if len(singleRowInds)==0 and len(singleColInds)==0:
return rowBased_row_array,rowBased_col_array,rowBased_data_array,None,None,nSamplesOri
# remove samples that only have affinity/similarity with itself
# or only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist
# in these two cases, these samples are removed from FSAPC computation and their examplers are set to themself.
if len(singleSampleInds)>0:
# row indexs that left after remove single samples
rowLeft=sorted(list(set(range(nSamplesOri))-singleSampleInds))
# map of original row index to current row index(after remove rows/cols that only have single item)
rowOriLeftDict={ori:left for left,ori in enumerate(rowLeft)}
rowLeftOriDict={left:ori for ori,left in rowOriLeftDict.items()}
rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds)
else: # no samples are removed
rowLeftOriDict=None
#if len(singleSampleInds)>0:
#rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds)
# for samples that need copy a minimal value to have at least two datapoints in row/column
# for samples that row have single data point, copy minimal value of this sample's column
singleRowInds=singleRowInds-singleSampleInds
if len(singleRowInds)>0:
rowBased_row_array,rowBased_col_array,rowBased_data_array=copySym(rowBased_row_array.astype(np.int),rowBased_col_array.astype(np.int),rowBased_data_array,singleRowInds)
# for samples that col have single data point, copy minimal value of this sample's row
singleColInds=singleColInds-singleSampleInds
if len(singleColInds)>0:
rowBased_col_array,rowBased_row_array,rowBased_data_array=copySym(rowBased_col_array.astype(np.int),rowBased_row_array.astype(np.int),rowBased_data_array,singleColInds)
# change row, col index if there is any sample removed
if len(singleSampleInds)>0:
changeIndV=np.vectorize(lambda x:rowOriLeftDict[x])
rowBased_row_array=changeIndV(rowBased_row_array)
rowBased_col_array=changeIndV(rowBased_col_array)
#rearrange based on new row index and new col index, print ('{0}, sort by row,col'.format(datetime.now()))
sortedLeftOriInd = np.lexsort((rowBased_col_array,rowBased_row_array)).astype(np.int)
rowBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array.astype(np.int),sortedLeftOriInd)
rowBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array.astype(np.int),sortedLeftOriInd)
rowBased_data_array=sparseAP_cy.npArrRearrange_float_para(rowBased_data_array,sortedLeftOriInd)
return rowBased_row_array,rowBased_col_array,rowBased_data_array,rowLeftOriDict,singleSampleInds,nSamplesOri-len(singleSampleInds)
def preCompute(rowBased_row_array,rowBased_col_array,S_rowBased_data_array):
"""
format affinity/similarity matrix
"""
# Get parameters
data_len=len(S_rowBased_data_array)
row_indptr=sparseAP_cy.getIndptr(rowBased_row_array)
if row_indptr[-1]!=data_len: row_indptr=np.concatenate((row_indptr,np.array([data_len])))
row_to_col_ind_arr=np.lexsort((rowBased_row_array,rowBased_col_array))
colBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array,row_to_col_ind_arr)
colBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array,row_to_col_ind_arr)
col_to_row_ind_arr=np.lexsort((colBased_col_array,colBased_row_array))
col_indptr=sparseAP_cy.getIndptr(colBased_col_array)
if col_indptr[-1]!=data_len: col_indptr=np.concatenate((col_indptr,np.array([data_len])))
kk_col_index=sparseAP_cy.getKKIndex(colBased_row_array,colBased_col_array)
#Initialize matrix A, R
A_rowbased_data_array=np.array([0.0]*data_len)
R_rowbased_data_array=np.array([0.0]*data_len)
#Add random samll value to remove degeneracies
random_state=np.random.RandomState(0)
S_rowBased_data_array+=1e-12*random_state.randn(data_len)*(np.amax(S_rowBased_data_array)-np.amin(S_rowBased_data_array))
#Convert row_to_col_ind_arr/col_to_row_ind_arr data type to np.int datatype so it is compatible with cython code
row_to_col_ind_arr=row_to_col_ind_arr.astype(np.int)
col_to_row_ind_arr=col_to_row_ind_arr.astype(np.int)
return S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array,col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index
| bioinfocao/pysapc | pysapc/sparseMatrixPrepare.py | Python | bsd-3-clause | 8,514 |
import os, sys, time
from subprocess import check_call
import pj.api
from pyxc.util import parentOf
EXAMPLES_ROOT = parentOf(parentOf(os.path.abspath(__file__)))
PATH = [
'%s/colorflash/js' % EXAMPLES_ROOT,
'%s/mylib/js' % EXAMPLES_ROOT,
]
def main():
check_call(['mkdir', '-p', 'build'])
js = None
for closureMode in ['', 'pretty', 'simple']:
filename = {
'': 'colorflash.raw.js',
'pretty': 'colorflash.pretty.js',
'simple': 'colorflash.min.simple.js',
}[closureMode]
path = 'build/%s' % filename
sys.stderr.write('%s... ' % path)
start = time.time()
if not js:
js = pj.api.buildBundle('colorflash.colorflash', path=PATH)
with open(path, 'wb') as f:
f.write(pj.api.closureCompile(js, closureMode))
ms = int((time.time() - start) * 1000)
sys.stderr.write('done. (%d ms)\n' % ms)
if __name__ == '__main__':
main()
| andrewschaaf/pyxc-pj | pj-examples/colorflash/make.py | Python | mit | 1,046 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from datetime import *
class clv_file(models.Model):
_inherit = 'clv_file'
date = fields.Datetime("Status change date", required=True, readonly=True)
state = fields.Selection([('new','New'),
('getting','Getting'),
('stored','Stored'),
('checked','Checked'),
('deleted','Deleted'),
], string='Status', default='new', readonly=True, required=True, help="")
_defaults = {
'date': lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
@api.one
def button_new(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'new'
@api.one
def button_getting(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'getting'
@api.one
def button_stored(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'stored'
@api.one
def button_checked(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'checked'
@api.one
def button_deleted(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'deleted'
| CLVsol/odoo_addons | clv_file/wkf/clv_file_wkf.py | Python | agpl-3.0 | 2,759 |
import os
from django.conf.urls.defaults import *
urlpatterns = patterns('debately.views',
(r'^debates/challenge/(\d+)', 'challenge_debate'),
(r'^debates/create/$', 'create_debate'),
(r'^debates/(\d+)$', 'debate'),
(r'^entries/(\d+)/comment/', 'create_comment'),
(r'^messages$', 'usermessages'),
(r'^users/(.*)', 'userpage'),
(r'^$', 'index'),
)
| SnacksOnAPlane/debately | urls.py | Python | bsd-3-clause | 399 |
import pandas as pd
import os
def process_order_data_dir(needed_map_dir):
if not os.path.isdir(needed_map_dir) or not os.path.exists(needed_map_dir):
raise IOError("ERROR: " + needed_map_dir + " not existed or its not a dir")
print("change order sheet... in " + needed_map_dir)
for file in os.listdir(needed_map_dir):
if ".csv" in file:
file_path = os.path.join(needed_map_dir, file)
print(file)
# map all the district into concrete value
mapped_data_frame = process_order_data(pd.read_csv(file_path))
# change the file
mapped_data_frame.to_csv(file_path, index = True)
def process_order_data(data):
data["NULL"] = data["driver_id"].isnull()
df = pd.DataFrame(columns=["order_count","null_count","fee_sum"])
grouped = data.groupby(["Time","start_district"], sort=True)
df["order_count"] = grouped.count()["dest_district"]
df["null_count"] = grouped.sum()["NULL"].astype(int)
df["fee_sum"] = grouped.sum()["Price"]
return df
| Heipiao/didi_competition | operate_order_sheet.py | Python | mit | 1,067 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp.osv import osv
class invoice(osv.osv):
_inherit = 'account.invoice'
def clean_internal_number(self, cr, uid, ids, context=None):
# We also clean reference for compatibility with argentinian localization
self.write(cr, uid, ids, {'internal_number':False,'afip_document_number':False}, context=context) | maljac/odoo-addons | account_clean_cancelled_invoice_number/account_invoice.py | Python | agpl-3.0 | 609 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Malayalam language.
.. seealso:: http://en.wikipedia.org/wiki/Malayalam_language
"""
from translate.lang import common
class ml(common.Common):
"""This class represents Malayalam."""
ignoretests = ["startcaps", "simplecaps"]
| bluemini/kuma | vendor/packages/translate/lang/ml.py | Python | mpl-2.0 | 1,024 |
from ebu_tt_live.documents import EBUTT3Document, EBUTTAuthorsGroupControlRequest, EBUTT3DocumentSequence
from ebu_tt_live.node import SimpleConsumer
from ebu_tt_live.carriage import IConsumerCarriage
from ebu_tt_live.errors import UnexpectedSequenceIdentifierError
from mock import MagicMock
from unittest import TestCase
from datetime import timedelta
from ebu_tt_live.clocks.local import LocalMachineClock
class TestSimpleConsumerUnit(TestCase):
def setUp(self):
carriage = MagicMock(spec=IConsumerCarriage)
carriage.provides.return_value = EBUTT3Document
self.consumer = SimpleConsumer(
node_id='testConsumer',
consumer_carriage=carriage
)
def test_process_document(self):
# This is not quite unit... this is integration test
doc = EBUTT3Document(
time_base='clock',
clock_mode='local',
lang='en-GB',
sequence_identifier='testSequenceEncoder01',
sequence_number='1'
)
self.consumer.process_document(document=doc, availability_time=timedelta())
self.assertIsInstance(self.consumer._sequence, EBUTT3DocumentSequence)
self.assertIsInstance(self.consumer.reference_clock, LocalMachineClock)
def test_process_two_documents_ignore_second_sequence_id(self):
first_sequence = EBUTT3Document(
time_base='clock',
clock_mode='local',
lang='en-GB',
sequence_identifier='testSequenceEncoder01',
sequence_number='1'
)
second_sequence = EBUTT3Document(
time_base='clock',
clock_mode='local',
lang='en-GB',
sequence_identifier='testSequenceEncoder02',
sequence_number='1'
)
self.consumer.process_document(document=first_sequence)
self.assertIsInstance(self.consumer._sequence, EBUTT3DocumentSequence)
self.assertIsInstance(self.consumer.reference_clock, LocalMachineClock)
with self.assertRaises(UnexpectedSequenceIdentifierError) as context:
self.consumer.process_document(document=second_sequence)
self.assertTrue('Rejecting new sequence identifier' in context.exception.args[0])
def test_control_request(self):
# The message must be ignored
message = EBUTTAuthorsGroupControlRequest(
sequence_identifier='TestSequence',
sender='sender',
recipient=['one', 'two'],
payload='Test payload'
)
self.consumer.process_document(document=message)
self.assertIsNone(self.consumer._sequence)
self.assertIsNone(self.consumer.reference_clock)
| bbc/ebu-tt-live-toolkit | ebu_tt_live/node/test/test_consumer_unit.py | Python | bsd-3-clause | 2,719 |
#!/usr/bin/env python
from numarray import *
import sys
from PyQt4.Qwt3D import *
from PyQt4.Qt import *
# enable all tracing options of the SIP generated bindings (requires -r option)
if False:
import sip
sip.settracemask(0x3f)
def matrix2d(nx, ny, minx, maxx, miny, maxy, function):
"""Return a data matrix to test the interface to the C++ member function
bool SurfacePlot::loadFromData(
double **, unsigned int, unsigned int, double, double, double, double);
"""
# columns
xs = multiply.outer(
minx + ((maxx-minx)/(nx-1))*arange(nx), ones(ny, Float))
# rows
ys = multiply.outer(
ones((nx,), Float), miny+((maxy-miny)/(ny-1))*arange(ny))
return function(xs, ys)
# matrix2d()
def matrix3d(nx, ny, minx, maxx, miny, maxy, function):
"""Return a data matrix to test the interface to the C++ member function
bool SurfacePlot::loadFromData(
Triple **, unsigned int, unsigned int, bool = false, bool = false);
"""
xyzs = zeros((nx, ny, 3), Float)
# columns
xyzs[:,:,0] = multiply.outer(
minx + ((maxx-minx)/(nx-1))*arange(nx), ones(ny, Float))
# rows
xyzs[:,:,1] = multiply.outer(
ones((nx,), Float), miny+((maxy-miny)/(ny-1))*arange(ny))
# result
xyzs[:,:,2] = function(xyzs[:,:,0], xyzs[:,:,1])
return xyzs
# matrix3d()
def saddle(x, y):
return x*y
# saddle()
class Plot(SurfacePlot):
def __init__(self, *args):
SurfacePlot.__init__(self, *args)
self.setBackgroundColor(RGBA(1.0, 1.0, 0.6))
self.setRotation(30, 0, 15)
self.setScale(1.0, 1.0, 1.0)
nx, ny, minx, maxx, miny, maxy = 3, 5, -1.0, 1.0, -1.0, 1.0
if True:
zs = matrix2d(nx, ny, minx, maxx, miny, maxy, saddle)
print type(zs)
print zs
self.loadFromData(zs, minx, maxx, miny, maxy)
else:
xyzs = matrix3d(nx, ny, minx, maxx, miny, maxy, saddle)
print type(zs)
print xyzs
self.loadFromData(xyzs)
axes = self.coordinates().axes # alias
for axis, label in ((X1, "x"), (Y1, "y"), (Z1, "z")):
axes[axis].setAutoScale(False)
axes[axis].setMajors(5) # 6 major ticks
axes[axis].setMinors(3) # 2 minor ticks
axes[axis].setLabelString(label)
self.setCoordinateStyle(BOX)
self.coordinates().setGridLines(True, True)
self.coordinates().setLineSmooth(True)
self.updateData()
self.updateGL()
# __init__()
# class Plot
def make():
demo = Plot()
demo.show()
# Matrox cards on Linux work better with a resize() after show()
demo.resize(600, 400)
return demo
# make()
def main(args):
app = QApplication(args)
demo = make()
app.exec_()
# main()
# Admire
if __name__ == '__main__':
main(sys.argv)
# Local Variables: ***
# mode: python ***
# End: ***
| PyQwt/PyQwt3D | qt4examples/TestNumarray.py | Python | gpl-2.0 | 2,972 |
import unittest
from aula4.pilha import Pilha, PilhaVaziaErro
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
"""
#Análise de Complexidade
#Tempo e memória são O(n)
pilha = Pilha()
if expressao == "":
return True
elif expressao[0] in ')}]':
return False
else:
for caracter in expressao:
if caracter in '({[':
pilha.empilhar(caracter)
else:
try:
desenpilhado = pilha.desempilhar()
except PilhaVaziaErro:
return pilha.vazia()
if caracter == '}' and desenpilhado != '{':
return False
elif caracter == ']' and desenpilhado != '[':
return False
elif caracter == ')' and desenpilhado != '(':
return False
return pilha.vazia()
class BalancearTestes(unittest.TestCase):
def test_expressao_vazia(self):
self.assertTrue(esta_balanceada(''))
def test_parenteses(self):
self.assertTrue(esta_balanceada('()'))
def test_chaves(self):
self.assertTrue(esta_balanceada('{}'))
def test_colchetes(self):
self.assertTrue(esta_balanceada('[]'))
def test_todos_caracteres(self):
self.assertTrue(esta_balanceada('({[]})'))
self.assertTrue(esta_balanceada('[({})]'))
self.assertTrue(esta_balanceada('{[()]}'))
def test_chave_nao_fechada(self):
self.assertFalse(esta_balanceada('{'))
def test_colchete_nao_fechado(self):
self.assertFalse(esta_balanceada('['))
def test_parentese_nao_fechado(self):
self.assertFalse(esta_balanceada('('))
def test_chave_nao_aberta(self):
self.assertFalse(esta_balanceada('}{'))
def test_colchete_nao_aberto(self):
self.assertFalse(esta_balanceada(']['))
def test_parentese_nao_aberto(self):
self.assertFalse(esta_balanceada(')('))
def test_falta_de_caracter_de_fechamento(self):
self.assertFalse(esta_balanceada('({[]}'))
def test_falta_de_caracter_de_abertura(self):
self.assertFalse(esta_balanceada('({]})'))
def test_expressao_matematica_valida(self):
self.assertTrue(esta_balanceada('({[1+3]*5}/7)+9'))
def test_char_errado_fechando(self):
self.assertFalse(esta_balanceada('[)')) | jpaDeveloper/estrutura-de-dados | estrutura-de-dados-master/Exercicios/balanciamento.py | Python | mit | 2,776 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from collections import namedtuple
from functools import partial
import six
from cassandra import InvalidRequest
from cassandra.cluster import Cluster, UserTypeDoesNotExist
from cassandra.query import dict_factory
from cassandra.util import OrderedMap
from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, greaterthanorequalcass36
from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, COLLECTION_TYPES, \
get_sample, get_collection_sample
nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's'])
nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u'])
def setup_module():
use_singledc()
update_datatypes()
@greaterthancass20
class UDTTests(BasicSegregatedKeyspaceUnitTestCase):
@property
def table_name(self):
return self._testMethodName.lower()
def setUp(self):
super(UDTTests, self).setUp()
self.session.set_keyspace(self.keyspace_name)
@greaterthanorequalcass36
def test_non_frozen_udts(self):
"""
Test to ensure that non frozen udt's work with C* >3.6.
@since 3.7.0
@jira_ticket PYTHON-498
@expected_result Non frozen UDT's are supported
@test_category data_types, udt
"""
self.session.execute("USE {0}".format(self.keyspace_name))
self.session.execute("CREATE TYPE user (state text, has_corn boolean)")
self.session.execute("CREATE TABLE {0} (a int PRIMARY KEY, b user)".format(self.function_table_name))
User = namedtuple('user', ('state', 'has_corn'))
self.cluster.register_user_type(self.keyspace_name, "user", User)
self.session.execute("INSERT INTO {0} (a, b) VALUES (%s, %s)".format(self.function_table_name), (0, User("Nebraska", True)))
self.session.execute("UPDATE {0} SET b.has_corn = False where a = 0".format(self.function_table_name))
result = self.session.execute("SELECT * FROM {0}".format(self.function_table_name))
self.assertFalse(result[0].b.has_corn)
table_sql = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].as_cql_query()
self.assertNotIn("<frozen>", table_sql)
def test_can_insert_unprepared_registered_udts(self):
"""
Test the insertion of unprepared, registered UDTs
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_unprepared_registered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_unprepared_registered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_unprepared_registered2", "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User)
s.execute("DROP KEYSPACE udt_test_unprepared_registered2")
c.shutdown()
def test_can_register_udt_before_connecting(self):
"""
Test the registration of UDTs before session creation
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(wait_for_all_pools=True)
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting")
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
# now that types are defined, shutdown and re-create Cluster
c.shutdown()
c = Cluster(protocol_version=PROTOCOL_VERSION)
User1 = namedtuple('user', ('age', 'name'))
User2 = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_register_before_connecting", "user", User1)
c.register_user_type("udt_test_register_before_connecting2", "user", User2)
s = c.connect(wait_for_all_pools=True)
s.set_keyspace("udt_test_register_before_connecting")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User1)
# use the same UDT name in a different keyspace
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User2)
s.execute("DROP KEYSPACE udt_test_register_before_connecting")
s.execute("DROP KEYSPACE udt_test_register_before_connecting2")
c.shutdown()
def test_can_insert_prepared_unregistered_udts(self):
"""
Test the insertion of prepared, unregistered UDTs
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User(42, 'bob')))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_prepared_unregistered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_prepared_unregistered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User('Texas', True)))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
s.execute("DROP KEYSPACE udt_test_prepared_unregistered2")
c.shutdown()
def test_can_insert_prepared_registered_udts(self):
"""
Test the insertion of prepared, registered UDTs
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
User = namedtuple('user', ('age', 'name'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User(42, 'bob')))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_prepared_registered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_prepared_registered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
User = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_prepared_registered2", "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User('Texas', True)))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User)
s.execute("DROP KEYSPACE udt_test_prepared_registered2")
c.shutdown()
def test_can_insert_udts_with_nulls(self):
"""
Test the insertion of UDTs with null and empty string fields
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)")
User = namedtuple('user', ('a', 'b', 'c', 'd'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (0, ?)")
s.execute(insert, [User(None, None, None, None)])
results = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual((None, None, None, None), results[0].b)
select = s.prepare("SELECT b FROM mytable WHERE a=0")
self.assertEqual((None, None, None, None), s.execute(select)[0].b)
# also test empty strings
s.execute(insert, [User('', None, None, six.binary_type())])
results = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual(('', None, None, six.binary_type()), results[0].b)
c.shutdown()
def test_can_insert_udts_with_varying_lengths(self):
"""
Test for ensuring extra-lengthy udts are properly inserted
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
MAX_TEST_LENGTH = 254
# create the seed udt, increase timeout to avoid the query failure on slow systems
s.execute("CREATE TYPE lengthy_udt ({0})"
.format(', '.join(['v_{0} int'.format(i)
for i in range(MAX_TEST_LENGTH)])))
# create a table with multiple sizes of nested udts
# no need for all nested types, only a spot checked few and the largest one
s.execute("CREATE TABLE mytable ("
"k int PRIMARY KEY, "
"v frozen<lengthy_udt>)")
# create and register the seed udt type
udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(MAX_TEST_LENGTH)]))
c.register_user_type(self.keyspace_name, "lengthy_udt", udt)
# verify inserts and reads
for i in (0, 1, 2, 3, MAX_TEST_LENGTH):
# create udt
params = [j for j in range(i)] + [None for j in range(MAX_TEST_LENGTH - i)]
created_udt = udt(*params)
# write udt
s.execute("INSERT INTO mytable (k, v) VALUES (0, %s)", (created_udt,))
# verify udt was written and read correctly, increase timeout to avoid the query failure on slow systems
result = s.execute("SELECT v FROM mytable WHERE k=0")[0]
self.assertEqual(created_udt, result.v)
c.shutdown()
def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH):
# create the seed udt
execute_until_pass(session, "CREATE TYPE depth_0 (age int, name text)")
# create the nested udts
for i in range(MAX_NESTING_DEPTH):
execute_until_pass(session, "CREATE TYPE depth_{0} (value frozen<depth_{1}>)".format(i + 1, i))
# create a table with multiple sizes of nested udts
# no need for all nested types, only a spot checked few and the largest one
execute_until_pass(session, "CREATE TABLE mytable ("
"k int PRIMARY KEY, "
"v_0 frozen<depth_0>, "
"v_1 frozen<depth_1>, "
"v_2 frozen<depth_2>, "
"v_3 frozen<depth_3>, "
"v_{0} frozen<depth_{0}>)".format(MAX_NESTING_DEPTH))
def nested_udt_creation_helper(self, udts, i):
if i == 0:
return udts[0](42, 'Bob')
else:
return udts[i](self.nested_udt_creation_helper(udts, i - 1))
def nested_udt_verification_helper(self, session, MAX_NESTING_DEPTH, udts):
for i in (0, 1, 2, 3, MAX_NESTING_DEPTH):
# create udt
udt = self.nested_udt_creation_helper(udts, i)
# write udt via simple statement
session.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", [i, udt])
# verify udt was written and read correctly
result = session.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
# write udt via prepared statement
insert = session.prepare("INSERT INTO mytable (k, v_{0}) VALUES (1, ?)".format(i))
session.execute(insert, [udt])
# verify udt was written and read correctly
result = session.execute("SELECT v_{0} FROM mytable WHERE k=1".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
def test_can_insert_nested_registered_udts(self):
"""
Test for ensuring nested registered udts are properly inserted
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.row_factory = dict_factory
MAX_NESTING_DEPTH = 16
# create the schema
self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH)
# create and register the seed udt type
udts = []
udt = namedtuple('depth_0', ('age', 'name'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_0", udts[0])
# create and register the nested udt types
for i in range(MAX_NESTING_DEPTH):
udt = namedtuple('depth_{0}'.format(i + 1), ('value'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1])
# insert udts and verify inserts with reads
self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts)
c.shutdown()
def test_can_insert_nested_unregistered_udts(self):
"""
Test for ensuring nested unregistered udts are properly inserted
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.row_factory = dict_factory
MAX_NESTING_DEPTH = 16
# create the schema
self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH)
# create the seed udt type
udts = []
udt = namedtuple('depth_0', ('age', 'name'))
udts.append(udt)
# create the nested udt types
for i in range(MAX_NESTING_DEPTH):
udt = namedtuple('depth_{0}'.format(i + 1), ('value'))
udts.append(udt)
# insert udts via prepared statements and verify inserts with reads
for i in (0, 1, 2, 3, MAX_NESTING_DEPTH):
# create udt
udt = self.nested_udt_creation_helper(udts, i)
# write udt
insert = s.prepare("INSERT INTO mytable (k, v_{0}) VALUES (0, ?)".format(i))
s.execute(insert, [udt])
# verify udt was written and read correctly
result = s.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
c.shutdown()
def test_can_insert_nested_registered_udts_with_different_namedtuples(self):
"""
Test for ensuring nested udts are inserted correctly when the
created namedtuples are use names that are different the cql type.
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.row_factory = dict_factory
MAX_NESTING_DEPTH = 16
# create the schema
self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH)
# create and register the seed udt type
udts = []
udt = namedtuple('level_0', ('age', 'name'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_0", udts[0])
# create and register the nested udt types
for i in range(MAX_NESTING_DEPTH):
udt = namedtuple('level_{0}'.format(i + 1), ('value'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1])
# insert udts and verify inserts with reads
self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts)
c.shutdown()
def test_raise_error_on_nonexisting_udts(self):
"""
Test for ensuring that an error is raised for operating on a nonexisting udt or an invalid keyspace
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
User = namedtuple('user', ('age', 'name'))
with self.assertRaises(UserTypeDoesNotExist):
c.register_user_type("some_bad_keyspace", "user", User)
with self.assertRaises(UserTypeDoesNotExist):
c.register_user_type("system", "user", User)
with self.assertRaises(InvalidRequest):
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
c.shutdown()
def test_can_insert_udt_all_datatypes(self):
"""
Test for inserting various types of PRIMITIVE_DATATYPES into UDT's
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
# create UDT
alpha_type_list = []
start_index = ord('a')
for i, datatype in enumerate(PRIMITIVE_DATATYPES):
alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype))
s.execute("""
CREATE TYPE alldatatypes ({0})
""".format(', '.join(alpha_type_list))
)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)")
# register UDT
alphabet_list = []
for i in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)):
alphabet_list.append('{0}'.format(chr(i)))
Alldatatypes = namedtuple("alldatatypes", alphabet_list)
c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes)
# insert UDT data
params = []
for datatype in PRIMITIVE_DATATYPES:
params.append((get_sample(datatype)))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, Alldatatypes(*params)))
# retrieve and verify data
results = s.execute("SELECT * FROM mytable")
row = results[0].b
for expected, actual in zip(params, row):
self.assertEqual(expected, actual)
c.shutdown()
def test_can_insert_udt_all_collection_datatypes(self):
"""
Test for inserting various types of COLLECTION_TYPES into UDT's
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
# create UDT
alpha_type_list = []
start_index = ord('a')
for i, collection_type in enumerate(COLLECTION_TYPES):
for j, datatype in enumerate(PRIMITIVE_DATATYPES_KEYS):
if collection_type == "map":
type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
elif collection_type == "tuple":
type_string = "{0}_{1} frozen<{2}<{3}>>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
else:
type_string = "{0}_{1} {2}<{3}>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
alpha_type_list.append(type_string)
s.execute("""
CREATE TYPE alldatatypes ({0})
""".format(', '.join(alpha_type_list))
)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)")
# register UDT
alphabet_list = []
for i in range(ord('a'), ord('a') + len(COLLECTION_TYPES)):
for j in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES_KEYS)):
alphabet_list.append('{0}_{1}'.format(chr(i), chr(j)))
Alldatatypes = namedtuple("alldatatypes", alphabet_list)
c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes)
# insert UDT data
params = []
for collection_type in COLLECTION_TYPES:
for datatype in PRIMITIVE_DATATYPES_KEYS:
params.append((get_collection_sample(collection_type, datatype)))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, Alldatatypes(*params)))
# retrieve and verify data
results = s.execute("SELECT * FROM mytable")
row = results[0].b
for expected, actual in zip(params, row):
self.assertEqual(expected, actual)
c.shutdown()
def insert_select_column(self, session, table_name, column_name, value):
insert = session.prepare("INSERT INTO %s (k, %s) VALUES (?, ?)" % (table_name, column_name))
session.execute(insert, (0, value))
result = session.execute("SELECT %s FROM %s WHERE k=%%s" % (column_name, table_name), (0,))[0][0]
self.assertEqual(result, value)
def test_can_insert_nested_collections(self):
"""
Test for inserting various types of nested COLLECTION_TYPES into tables and UDTs
"""
if self.cass_version < (2, 1, 3):
raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3")
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple
name = self._testMethodName
s.execute("""
CREATE TYPE %s (
m frozen<map<int,text>>,
t tuple<int,text>,
l frozen<list<int>>,
s frozen<set<int>>
)""" % name)
s.execute("""
CREATE TYPE %s_nested (
m frozen<map<int,text>>,
t tuple<int,text>,
l frozen<list<int>>,
s frozen<set<int>>,
u frozen<%s>
)""" % (name, name))
s.execute("""
CREATE TABLE %s (
k int PRIMARY KEY,
map_map map<frozen<map<int,int>>, frozen<map<int,int>>>,
map_set map<frozen<set<int>>, frozen<set<int>>>,
map_list map<frozen<list<int>>, frozen<list<int>>>,
map_tuple map<frozen<tuple<int, int>>, frozen<tuple<int>>>,
map_udt map<frozen<%s_nested>, frozen<%s>>,
)""" % (name, name, name))
validate = partial(self.insert_select_column, s, name)
validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})]))
validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))]))
validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])]))
validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))]))
value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10)))
key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value)
key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value)
validate('map_udt', OrderedMap([(key, value), (key2, value)]))
c.shutdown()
def test_non_alphanum_identifiers(self):
"""
PYTHON-413
"""
s = self.session
non_alphanum_name = 'test.field@#$%@%#!'
type_name = 'type2'
s.execute('CREATE TYPE "%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name))
s.execute('CREATE TYPE %s ("%s" text)' % (type_name, non_alphanum_name))
# table with types as map keys to make sure the tuple lookup works
s.execute('CREATE TABLE %s (k int PRIMARY KEY, non_alphanum_type_map map<frozen<"%s">, int>, alphanum_type_map map<frozen<%s>, int>)' % (self.table_name, non_alphanum_name, type_name))
s.execute('INSERT INTO %s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name))
row = s.execute('SELECT * FROM %s' % (self.table_name,))[0]
k, v = row.non_alphanum_type_map.popitem()
self.assertEqual(v, 0)
self.assertEqual(k.__class__, tuple)
self.assertEqual(k[0], 'nonalphanum')
k, v = row.alphanum_type_map.popitem()
self.assertEqual(v, 1)
self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type
self.assertEqual(k[0], 'alphanum')
self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name
def test_type_alteration(self):
s = self.session
type_name = "type_name"
self.assertNotIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types)
s.execute('CREATE TYPE %s (v0 int)' % (type_name,))
self.assertIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types)
s.execute('CREATE TABLE %s (k int PRIMARY KEY, v frozen<%s>)' % (self.table_name, type_name))
s.execute('INSERT INTO %s (k, v) VALUES (0, {v0 : 1})' % (self.table_name,))
s.cluster.register_user_type('udttests', type_name, dict)
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 1)
# add field
s.execute('ALTER TYPE %s ADD v1 text' % (type_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 1)
self.assertIsNone(val['v1'])
s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 2, v1 : 'sometext'})" % (self.table_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 2)
self.assertEqual(val['v1'], 'sometext')
# alter field type
s.execute('ALTER TYPE %s ALTER v1 TYPE blob' % (type_name,))
s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 3, v1 : 0xdeadbeef})" % (self.table_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 3)
self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef'))
def test_alter_udt(self):
"""
Test to ensure that altered UDT's are properly surfaced without needing to restart the underlying session.
@since 3.0.0
@jira_ticket PYTHON-226
@expected_result UDT's will reflect added columns without a session restart.
@test_category data_types, udt
"""
# Create udt ensure it has the proper column names.
self.session.set_keyspace(self.keyspace_name)
self.session.execute("CREATE TYPE typetoalter (a int)")
typetoalter = namedtuple('typetoalter', ('a'))
self.session.execute("CREATE TABLE {0} (pk int primary key, typetoalter frozen<typetoalter>)".format(self.function_table_name))
insert_statement = self.session.prepare("INSERT INTO {0} (pk, typetoalter) VALUES (?, ?)".format(self.function_table_name))
self.session.execute(insert_statement, [1, typetoalter(1)])
results = self.session.execute("SELECT * from {0}".format(self.function_table_name))
for result in results:
self.assertTrue(hasattr(result.typetoalter, 'a'))
self.assertFalse(hasattr(result.typetoalter, 'b'))
# Alter UDT and ensure the alter is honored in results
self.session.execute("ALTER TYPE typetoalter add b int")
typetoalter = namedtuple('typetoalter', ('a', 'b'))
self.session.execute(insert_statement, [2, typetoalter(2, 2)])
results = self.session.execute("SELECT * from {0}".format(self.function_table_name))
for result in results:
self.assertTrue(hasattr(result.typetoalter, 'a'))
self.assertTrue(hasattr(result.typetoalter, 'b'))
| vipjml/python-driver | tests/integration/standard/test_udts.py | Python | apache-2.0 | 31,540 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# home
#
#
# vim:fileencoding=utf-8:sw=4:et -*- coding: utf-8 -*-
#
# 测试 pyalgotrade 回测
#
import _index
from energy.libs.MongoStock import Feed
from energy.libs.eAlgoLib import eAlgoLib as eal
from pyalgotrade import strategy
from pyalgotrade import bar
from pyalgotrade.technical import ma
import pandas as pd
import sys
class pyAlgoWMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, bBandsPeriod):
strategy.BacktestingStrategy.__init__(self, feed)
self.setDebugMode(False)
self.__instrument = instrument
self.__feed = feed
self.__position = None
self.__sma = ma.WMA(feed[instrument].getCloseDataSeries(), 15)
self.__col = ["buyPrice","buyTime","sellPrice","sellTime", "returns"]
self.__msdf = pd.DataFrame(columns=self.__col)
self.__buyPrice = 0
self.__buyTime = None
self.setUseAdjustedValues(True)
def EchoDF(self):
return self.__msdf.to_json(orient="split")
def onEnterOk(self, position):
execInfo = position.getEntryOrder().getExecutionInfo()
#self.info("BUY at $%.2f"%(execInfo.getPrice()))
self.__buyPrice = execInfo.getPrice()
self.__buyTime = execInfo.getDateTime()
def onEnterCanceled(self, position):
#self.info("onEnterCanceled")
self.__position = None
def onExitOk(self, position):
execInfo = position.getExitOrder().getExecutionInfo()
#self.info("SELL at $%.2f"%(execInfo.getPrice()))
self.__position = None
pdser = pd.Series([self.__buyPrice, str(self.__buyTime)[:10],
execInfo.getPrice(),str(execInfo.getDateTime())[:10], (execInfo.getPrice() -self.__buyPrice)],index=self.__col )
self.__msdf = self.__msdf.append(pdser,ignore_index=True)
self.__buyPrice = 0
self.__buyTime = None
def onExitCanceled(self, position):
self.info("onExitCanceled")
self.__position.exitMarket()
def onBars(self, bars):
if self.__sma[-1] is None:
return
bar = bars[self.__instrument]
#self.info("close:%s sma:%s rsi:%s" % (bar.getClose(), self.__sma[-1], self.__rsi[-1]))
if self.__position is None:
if bar.getPrice() > self.__sma[-1]:
# Enter a buy market order for 10 shares. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, 10, True)
#print dir(self.__position)
# Check if we have to exit the position.
elif bar.getPrice() < self.__sma[-1] and not self.__position.exitActive():
self.__position.exitMarket()
def main(i, code):
#code = "000592"
dbfeed = Feed(code, bar.Frequency.DAY, 1024)
dbfeed.loadBars()
myStrategy = pyAlgoWMA(dbfeed, code, bBandsPeriod=i)
ms = eal()
ms.protfolio(myStrategy)
if __name__ == "__main__":
code = sys.argv[1]
#for m in range(10,60,5):
m = 40
main(m, code)
| vyouzhis/energy | epyalgo/pyAlgoWMA.py | Python | apache-2.0 | 3,057 |
import collections
import yaml
class Group(collections.OrderedDict):
__slots__ = ('start_mark',)
class ConfigLoader(yaml.Loader):
"""Config loader with yaml tags"""
def construct_yaml_map(self, node):
data = Group()
data.start_mark = node.start_mark
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
return collections.OrderedDict(self.construct_pairs(node, deep=deep))
yaml.add_constructor('tag:yaml.org,2002:map',
ConfigLoader.construct_yaml_map, Loader=ConfigLoader)
class YamlyType(yaml.YAMLObject):
def __init__(self, default=None):
self.default_ = default
self.inheritance = None
@classmethod
def from_yaml(cls, Loader, node):
if isinstance(node, yaml.ScalarNode):
self = cls(default=Loader.construct_scalar(node))
else:
self = cls.__new__(cls)
self.__setstate__(Loader.construct_mapping(node))
self.start_mark = node.start_mark
return self
def __setstate__(self, state):
self.inheritance = None
default = state.pop('=', None)
for k, v in state.items():
setattr(self, varname(k), v)
if default is not None:
self.default_ = default
class Int(YamlyType):
yaml_tag = '!Int'
yaml_loader = ConfigLoader
class UInt(YamlyType):
yaml_tag = '!UInt'
yaml_loader = ConfigLoader
class Bool(YamlyType):
yaml_tag = '!Bool'
yaml_loader = ConfigLoader
class Float(YamlyType):
yaml_tag = '!Float'
yaml_loader = ConfigLoader
class String(YamlyType):
yaml_tag = '!String'
yaml_loader = ConfigLoader
class File(YamlyType):
yaml_tag = '!File'
yaml_loader = ConfigLoader
class Dir(YamlyType):
yaml_tag = '!Dir'
yaml_loader = ConfigLoader
class Struct(yaml.YAMLObject):
yaml_tag = '!Struct'
yaml_loader = ConfigLoader
def __init__(self, type):
self.type = type
@classmethod
def from_yaml(cls, Loader, node):
if isinstance(node, yaml.ScalarNode):
self = cls(type=Loader.construct_scalar(node))
else:
self = cls.__new__(cls)
self.__setstate__(Loader.construct_mapping(node))
self.start_mark = node.start_mark
return self
def __setstate__(self, state):
typ = state.pop('=', None)
for k, v in state.items():
setattr(self, varname(k), v)
if typ is not None:
self.type = typ
class Mapping(YamlyType):
yaml_tag = '!Mapping'
yaml_loader = ConfigLoader
class Array(YamlyType):
yaml_tag = '!Array'
yaml_loader = ConfigLoader
class Convert(yaml.YAMLObject):
yaml_tag = '!Convert'
yaml_loader = ConfigLoader
def __init__(self, fun):
self.fun = fun
@classmethod
def from_yaml(cls, Loader, node):
return cls(Loader.construct_scalar(node))
class VoidPtr(YamlyType):
yaml_tag = '!_VoidPtr'
yaml_loader = ConfigLoader
class CStruct(YamlyType):
yaml_tag = '!CStruct'
yaml_loader = ConfigLoader
def __init__(self, type):
self.structname = type
@classmethod
def from_yaml(cls, Loader, node):
if isinstance(node, yaml.ScalarNode):
self = cls(type=Loader.construct_scalar(node))
else:
self = cls(**Loader.construct_mapping(node))
return self
class CType(YamlyType):
yaml_tag = '!CType'
yaml_loader = ConfigLoader
def __init__(self, type):
self.type = type
@classmethod
def from_yaml(cls, Loader, node):
if isinstance(node, yaml.ScalarNode):
self = cls(type=Loader.construct_scalar(node))
else:
self = cls(**Loader.construct_mapping(node))
return self
from .core import Config, Usertype # sorry, circular dependency
from .util import varname
def load(input, config):
data = yaml.load(input, Loader=ConfigLoader)
config.fill_meta(data.pop('__meta__', {}))
for k, v in data.pop('__types__', {}).items():
typ = Usertype(k, v, start_mark=v.start_mark)
config.add_type(typ)
config.fill_data(data)
def main():
from .cli import simple
cfg, inp, opt = simple()
with inp:
load(inp, cfg)
if opt.print:
cfg.print()
if __name__ == '__main__':
main()
| tailhook/coyaml | coyaml/load.py | Python | mit | 4,518 |
#!/usr/bin/env python
#--coding:utf-8--
import socket
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#建立连接
s.connect(("127.0.0.1",9999))
#接收欢迎消息
print(s.recv(1024))
for data in ["Michael","Tracy","Sarah"]:
#发送数据
s.send(data)
print(s.recv(1024))
s.send("exit")
s.close() | niehuawen/python | opstcpclient.py | Python | agpl-3.0 | 316 |
"""
Assignment 1 Coursera 2013 - Introduction to Data Science
Computes the ten most frequently occurring hash tags from a tweet file.
Example:
$ python top_ten.py output.txt
gameinsight 77.0
TFBJP 65.0
RT 53.0
5DebilidadesMias 51.0
...
"""
import sys
import json
from collections import Counter
def get_top_ten(tweet_file):
with open(tweet_file) as f:
entities = (json.loads(line).get('entities', None) for line in f)
tweet_hashtags = (entity.get('hashtags') for entity in entities if entity)
texts = (tag['text'] for hashtags in tweet_hashtags for tag in hashtags)
return Counter(texts).most_common(10)
if __name__ == '__main__':
top_ten = get_top_ten(tweet_file=sys.argv[1])
sys.stdout.writelines('{0} {1}.0\n'.format(*pair) for pair in top_ten)
| elyase/twitter-sentiment | top_ten.py | Python | mit | 798 |
"""Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded."""
sep = _get_sep(a)
path = a
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = userhome.encode(sys.getfilesystemencoding())
userhome = userhome.rstrip(sep)
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
if isinstance(filename, bytes):
sep = b'/'
empty = b''
else:
sep = '/'
empty = ''
if isabs(filename):
bits = [sep] + filename.split(sep)[1:]
else:
bits = [empty] + filename.split(sep)
for i in range(2, len(bits)+1):
component = join(*bits[0:i])
# Resolve symbolic links.
if islink(component):
resolved = _resolve_link(component)
if resolved is None:
# Infinite loop -- return original component + rest of the path
return abspath(join(*([component] + bits[i:])))
else:
newpath = join(*([resolved] + bits[i:]))
return realpath(newpath)
return abspath(filename)
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = []
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.append(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path
supports_unicode_filenames = False
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/posixpath.py | Python | mit | 13,199 |
#!/usr/bin/python
import sys
import shutil
import os
import fileinput
import time
if len(sys.argv) < 3:
print 'arg 1 = Unity folder name'
print 'arg 2 = Unity project path'
print 'arg 3 = Unity package path'
print 'Example:'
print 'python buildTarget.py Unity <TapGearProjectPath> <UnityPackagePath>'
sys.exit(1)
# the import will only succeed if there are no compilation errors when the Unity project is opened, prior to the import
unityFolderName = sys.argv[1] #name of the unity folder in applications
unityProjectPath = sys.argv[2] #this is our project that we are building from
unityPackagePath = sys.argv[3] #this is the path to the unity package to import
print 'Unity Folder Name : ' + unityFolderName
print 'Unity Project Path : ' + unityProjectPath
print 'Unity Package Path : ' + unityPackagePath
print 'importing...'
os.system("\"/Applications/" + unityFolderName + "/Unity.app/Contents/MacOS/Unity\" -quit -batchmode -logFile /dev/stdout -projectPath \"" + unityProjectPath + "\" -importPackage \"" + unityPackagePath + "\"")
print 'done.'
| Grantoo/FuelTapGear-Sample-Unity | scripts/importPackage.py | Python | mit | 1,102 |
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
## Contributions: olivier PERIN
##
## last $Author$
## last $Date$
## $Revision$
"""
Analyze model quality
"""
import Biskit.tools as T
from Biskit.PDBModel import PDBModel
from Biskit.Mod.Benchmark import Benchmark
from Biskit.Mod.ValidationSetup import ValidationSetup as VS
from Biskit.Mod.CheckIdentities import CheckIdentities as CI
from Biskit.Mod.Modeller import Modeller
import os
from string import *
import numpy.oldnumeric as N
class Analyse:
"""
Create a folder named analyse in the project root folder
that contains the following results:
GLOBAL: global rmsd: all atoms, c-alpha only, percentage of
identities, Modeller score, and the number of templates
LOCAL: results of the cross validation, rmsd per residue c-alpha
only for each templates and the mean rmsd
3D structure: pickle down the final.pdb that is the best model of
the project and the mean rmsd is set inside (temperature_factor)
"""
F_RESULT_FOLDER = '/analyse'
F_TEMPLATE_FOLDER = VS.F_RESULT_FOLDER
F_PDBModels = Benchmark.F_PDBModels_OUT
F_MODELS = Modeller.F_RESULT_FOLDER + Modeller.F_PDBModels
F_INPUT_ALNS= '/t_coffee/final.pir_aln'
F_INPUT_RMSD = Benchmark.F_RESULT_FOLDER
F_RMSD_AA = Benchmark.F_RMSD_AA
F_RMSD_CA = Benchmark.F_RMSD_CA
F_OUTPUT_VALUES = F_RESULT_FOLDER + '/global_results.out'
F_CROSS_VAL = F_RESULT_FOLDER + '/local_results.out'
F_FINAL_PDB = F_RESULT_FOLDER + '/final.pdb'
def __init__( self, outFolder, log=None ):
"""
@param outFolder: base folder for output
@type outFolder: str
@param log: None reports to STDOUT
@type log: LogFile instance or None
"""
self.outFolder = T.absfile( outFolder )
self.log = log
self.prepareFolders()
def prepareFolders( self ):
"""
Create folders needed by this class.
"""
if not os.path.exists( self.outFolder + self.F_RESULT_FOLDER ):
os.mkdir( self.outFolder + self.F_RESULT_FOLDER )
def parseFile( self, name ):
"""
Parse a identity matrix file
@param name: file to parse
@type name: str
@return: contents of parsed file
@rtype: [[str]]
"""
f = open( name, 'r')
result = []
lines = f.readlines()
for l in lines:
if not l[0] == '#':
r =[]
for s in l.split():
try:
r += [float(s)]
except:
pass
if len(r)>=1:
result += [ r ]
f.close()
return result
def __listDir( self, path ):
"""
List all the files and folders in a directory
with the exceprion of ...
@param path: dir to list
@type path: str
@return: list of files
@rtype: [str]
"""
files = os.listdir( path )
if 'CVS' in files:
files.remove('CVS')
return files
######################################################################
#### GLOBAL RESTULTS: RMSD_AA, RMSD_CA,
#### %ID(mean of the templates),
#### Nb of Templates
def global_rmsd_aa(self, validation_folder= None):
"""
Global RMSD values.
@param validation_folder: folder vith validation data
(defult: None S{->} outFolder/L{F_TEMPLATE_FOLDER})
@type validation_folder: str
@return: two dictionaries:
- rmsd_aa_wo_if: global all atom rmsd for each
template without iterative fitting
- rmsd_aa_if: global all atom rmsd for each
templates with iterative fitting
@rtype: dict, dict
"""
validation_folder = validation_folder or self.outFolder + \
self.F_TEMPLATE_FOLDER
folders = self.__listDir(validation_folder)
rmsd_aa_wo_if = {}
rmsd_aa_if = {}
for folder in folders:
file = "%s/%s"%(validation_folder, folder + self.F_RMSD_AA)
lst = self.parseFile( file )
rmsd_aa_wo_if[folder] = [ lst[0][0] ]
rmsd_aa_if[folder] = [ lst[0][1], lst[0][2]*100.]
return rmsd_aa_wo_if, rmsd_aa_if
def global_rmsd_ca(self, validation_folder= None):
"""
Global RMSD CA values.
@param validation_folder: folder vith validation data
(defult: None S{->} outFolder/L{F_TEMPLATE_FOLDER})
@type validation_folder: str
@return: two dictionaries:
- rmsd_ca_wo_if: global CA rmsd for each template
without iterative fitting
- rmsd_ca_if: global CA rmsd for each template
with iterative fitting
@rtype: dict, dict
"""
validation_folder = validation_folder or self.outFolder + \
self.F_TEMPLATE_FOLDER
folders = self.__listDir(validation_folder)
rmsd_ca_wo_if = {}
rmsd_ca_if = {}
for folder in folders:
file = "%s/%s"%(validation_folder, folder + self.F_RMSD_CA)
lst = self.parseFile( file )
rmsd_ca_wo_if[folder] = [ lst[0][0] ]
rmsd_ca_if[folder] = [ lst[0][1], lst[0][2]*100.]
return rmsd_ca_wo_if, rmsd_ca_if
def get_identities(self, nb_templates, validation_folder = None):
"""
Calculate the mean of the percentage of identities for each
template with the others.
@param nb_templates: number of templates used in the cross-validation
@type nb_templates: int
@param validation_folder: folder vith validation data
(defult: None S{->} outFolder/L{F_TEMPLATE_FOLDER})
@type validation_folder: str
@return: dictionary with mean percent identities for each template
@rtype: {str:float}
"""
validation_folder = validation_folder or self.outFolder + \
self.F_TEMPLATE_FOLDER
folders = self.__listDir(validation_folder)
identities = {}
for folder in folders:
file = "%s/%s"%(validation_folder, folder + \
CI.F_OUTPUT_IDENTITIES_COV)
lst = self.parseFile( file )
## identity to mean template
identities[folder] = N.sum(lst[0][1:])/nb_templates
return identities
def get_score(self, validation_folder = None):
"""
Get the best global modeller score for each template re-modeled
@param validation_folder: folder vith validation data
(defult: None S{->} outFolder/L{F_TEMPLATE_FOLDER})
@type validation_folder: str
@return: dictionary with modeller score for each template
@rtype: {str:float}
"""
validation_folder = validation_folder or self.outFolder + \
self.F_TEMPLATE_FOLDER
folders = self.__listDir(validation_folder)
score = {}
for folder in folders:
file = "%s/%s"%(validation_folder, folder + Modeller.F_SCORE_OUT)
file = open(file, 'r')
string_lines = file.readlines()[3]
score[folder] = float( split(string_lines)[1] )
return score
def output_values(self, rmsd_aa_wo_if, rmsd_aa_if, rmsd_ca_wo_if,
rmsd_ca_if, identities, score, nb_templates,
output_file = None):
"""
Write result to file.
@param rmsd_aa_wo_if: Rmsd for heavy atoms and normal fit.
Data should be a dictionary mapping pdb
codes to a list containing the rmsd value
and the percent of discharded atoms in
the rmsd calculation.
@type rmsd_aa_wo_if: {str:[float,float]}
@param rmsd_aa_if: Rmsd for heavy atoms, iterative fit.
@type rmsd_aa_if: {str:[float,float]}
@param rmsd_ca_wo_if: rmsd for only CA, normal fit.
@type rmsd_ca_wo_if: {str:[float,float]}
@param rmsd_ca_if: Rmsd for only CA, iterative fit.
@type rmsd_ca_if: {str:[float,float]}
@param identities: mean identity to template, dictionary
mapping pdb codes to identity values
@type identities: {str:float}
@param score: score calculated by Modeller
@type score: float
@param nb_templates: number of templates used for re-modeling
@type nb_templates: int
@param output_file: file to write
(default: None S{->} outFolder/L{F_OUTPUT_VALUES})
@type output_file: str
"""
output_file = output_file or self.outFolder + self.F_OUTPUT_VALUES
file = open( output_file, 'w' )
file.write("PROPERTIES OF RE-MODELED TEMPLATES:\n\n")
file.write(" | NORMAL FIT | ITERATIVE FIT | IDENTITY | SCORE | NR\n" )
file.write("PDB | heavy CA | heavy percent CA percent | mean to | mod8 | of\n")
file.write("code | rmsd rmsd | rmsd discarded rmsd discarded | templates | | templates\n")
for key, value in rmsd_aa_wo_if.items():
file.write("%4s %6.2f %6.2f %6.2f %8.1f %7.2f %7.1f %10.1f %9i %6i\n"%\
(key, value[0], rmsd_ca_wo_if[key][0], rmsd_aa_if[key][0],
rmsd_aa_if[key][1], rmsd_ca_if[key][0], rmsd_ca_if[key][1],
identities[key], score[key], nb_templates))
file.close()
########################################################################
######### LOCAL RESULTS: Cross Validation ---- RMSD / Res
def get_aln_info(self, output_folder = None):
"""
Collect alignment information.
@param output_folder: output folder (default: None S{->} outFolder)
@type output_folder: str
@return: aln_dictionary, contains information from the alignment
between the target and its templates
e.g. {'name':'target, 'seq': 'sequence of the target'}
@rtype: dict
"""
output_folder = output_folder or self.outFolder
ci = CI(outFolder=output_folder)
string_lines = ci.get_lines()
aln_length = ci.search_length(string_lines)
aln_dictionnary = ci.get_aln_sequences(string_lines, aln_length)
aln_dictionnary = ci.get_aln_templates(string_lines, aln_dictionnary,
aln_length)
aln_dictionnary = ci.identities(aln_dictionnary)
return aln_dictionnary
def get_templates_rmsd(self, templates):
"""
Collect RMSD values between all the templates.
@param templates: name of the different templates
@type templates: [str]
@return: template_rmsd_dic, contains all the rmsd per residues
of all the templates
@rtype: dict
"""
template_rmsd_dic = {}
for template in templates:
pdb_list = self.outFolder + self.F_TEMPLATE_FOLDER \
+ "/%s"%template + self.F_PDBModels
pdb_list = T.load(pdb_list)
template_rmsd_dic[template] = \
pdb_list[0].compress(pdb_list[0].maskCA()).atoms["rmsd2ref_if"]
return template_rmsd_dic
def templates_profiles(self, templates, aln_dic, template_rmsd_dic):
"""
Collect RMSD profiles of each template with the target and their %ID.
@param templates: name of the different templates
@type templates: [str]
@param aln_dic: contains all the informations between the
target and its templates from the alignment
@type aln_dic: dict
@param template_rmsd_dic: contains all the rmsd per residues of all
the templates
@type template_rmsd_dic: dict
@return: template_profiles, contains all the profile rmsd of
each template with the target and their %ID
@rtype: dict
"""
templates_profiles = {}
target_aln = aln_dic["target"]["seq"]
for template in templates:
template_aln = []
template_profile = []
template_info = {}
template_rmsd = template_rmsd_dic[template]
for key in aln_dic:
if(key[:4] == template):
template_aln = aln_dic[key]["seq"]
no_res = -1
for i in range(len(target_aln)):
if(template_aln[i] is not '-'):
no_res += 1
if(target_aln[i] != '-' and template_aln[i] != '-'):
template_profile.append(template_rmsd[no_res])
if(target_aln[i] != '-' and template_aln[i] == '-'):
template_profile.append(-1)
template_info["rProfile"] = template_profile
for key in aln_dic["target"]["cov_ID"]:
if(key[:4] == template):
template_info["cov_ID"] = \
aln_dic["target"]["cov_ID"][key]
templates_profiles[template] = template_info
return templates_profiles
def output_cross_val(self, aln_dic, templates_profiles,
templates, model, output_file=None):
"""
Calculates the mean rmsd of the model to the templates and
write the result to a file.
@param aln_dic: contains all the informations between the
target and its templates from the alignment
@type aln_dic: dict
@param templates_profiles: contains all the profile rmsd of
each template with the target and their %ID
@type templates_profiles: dict
@param templates: name of the different templates
@type templates: [str]
@param model: model
@type model: PDBModel
@param output_file: output file
(default: None S{->} outFolder/L{F_CROSS_VAL})
@type output_file: str
@return: mean_rmsd, dictionary with the mean rmsd of the model
to the templates.
@rtype: dict
"""
output_file = output_file or self.outFolder + self.F_CROSS_VAL
mean, sum, values = 0, 0, 0
mean_rmsd = []
for k,v in aln_dic["target"]["cov_ID"].items():
if (k != "target"):
sum += aln_dic["target"]["cov_ID"][k]
values +=1
## cov_id_target = float(sum/values)
for i in range(len(templates_profiles[templates[0]]["rProfile"])):
mean = 0
sum = 0
n_values = 0
for k in templates_profiles:
if(templates_profiles[k]["rProfile"][i] != -1):
sum += templates_profiles[k]["rProfile"][i]
n_values += 1
if(n_values != 0):
mean = float(sum) / float(n_values)
else: mean = -1
mean_rmsd.append(mean)
## write header
file = open (output_file, 'w')
file.write("Mean rmsd of model to templates and the residue rmsd.\n")
## write pdb code
file.write(" "*7)
for k in templates_profiles.keys():
file.write("%6s"%k)
file.write(" mean\n")
## write mean rmsd
file.write(" "*7)
for k in templates_profiles.keys():
file.write("%6.2f"%templates_profiles[k]["cov_ID"])
file.write("\n%s\n"%('='*70))
## write rmsd residue profiles
res_nr = model.compress( model.maskCA()).atoms['residue_number']
res_na = model.compress( model.maskCA()).atoms['residue_name']
for i in range(len(templates_profiles[templates[0]]["rProfile"])):
file.write("%3i %3s"%(res_nr[i],
res_na[i]))
for k in templates_profiles:
file.write("%6.2f"%(templates_profiles[k]["rProfile"][i]))
file.write("%6.2f\n"%(mean_rmsd[i]))
file.close()
return mean_rmsd
#################################################
###### 3D Structure: mean RMSD
def updatePDBs_charge(self, mean_rmsd_atoms, model):
"""
pickle down the final.pdb which is judged to be the best model
of the project. The mean rmsd to the templates is written to the
temperature_factor column.
@param mean_rmsd_atoms: mean rmsd for each atom of the
target's model
@type mean_rmsd_atoms: [int]
@param model: target's model with the highest modeller score
@type model: PDBModel
"""
model['temperature_factor'] = mean_rmsd_atoms
model.writePdb(self.outFolder + self.F_FINAL_PDB)
######################
### LAUNCH FUNCTION ##
######################
def go(self, output_folder = None, template_folder = None):
"""
Run analysis of models.
@param output_folder: folder for result files
(default: None S{->} outFolder/L{F_RESULT_FOLDER})
@type output_folder: str
@param template_folder: folder with template structures
(default: None S{->} outFolder/L{VS.F_RESULT_FOLDER})
@type template_folder: str
"""
##
pdb_list = T.load(self.outFolder + self.F_MODELS)
model = PDBModel(pdb_list[0])
##
output_folder = output_folder or self.outFolder + self.F_RESULT_FOLDER
template_folder = template_folder or self.outFolder +VS.F_RESULT_FOLDER
templates = self.__listDir(template_folder)
##
global_rmsd_aa_wo_if, global_rmsd_aa_if = self.global_rmsd_aa()
global_rmsd_ca_wo_if, global_rmsd_ca_if = self.global_rmsd_ca()
nb_templates = len(templates)-1
identities = self.get_identities(nb_templates)
score = self.get_score()
self.output_values(global_rmsd_aa_wo_if, global_rmsd_aa_if,
global_rmsd_ca_wo_if, global_rmsd_ca_if,
identities, score, nb_templates)
##
aln_dic = self.get_aln_info(output_folder=self.outFolder)
template_rmsd_dic = self.get_templates_rmsd(templates)
templates_profiles = self.templates_profiles(templates,
aln_dic,
template_rmsd_dic)
mean_rmsd = self.output_cross_val(aln_dic, templates_profiles,
templates, model)
##
mean_rmsd_atoms = model.res2atomProfile(mean_rmsd)
self.updatePDBs_charge(mean_rmsd_atoms, model)
#############
## TESTING
#############
import Biskit.test as BT
class Test( BT.BiskitTest ):
"""
Test class
"""
def prepare(self):
import tempfile
import shutil
## collect the input files needed
self.outfolder = tempfile.mkdtemp( '_test_Analyse' )
## data from validation sub-projects
dir = '/Mod/project/validation/'
for v in ['1DT7', '1J55']:
os.makedirs( self.outfolder +'/validation/%s/modeller'%v )
shutil.copy( T.testRoot() +dir+'/%s/modeller/Modeller_Score.out'%v,
self.outfolder + '/validation/%s/modeller'%v)
shutil.copy( T.testRoot() + dir + '/%s/identities_cov.out'%v,
self.outfolder + '/validation/%s'%v )
os.mkdir( self.outfolder +'/validation/%s/benchmark'%v )
for f in [ 'rmsd_aa.out', 'rmsd_ca.out', 'PDBModels.list' ]:
shutil.copy( T.testRoot() + dir + '/%s/benchmark/%s'%(v,f),
self.outfolder + '/validation/%s/benchmark'%v )
## data from main project
os.mkdir( self.outfolder +'/modeller' )
shutil.copy( T.testRoot() + '/Mod/project/modeller/PDBModels.list',
self.outfolder + '/modeller' )
os.mkdir( self.outfolder +'/t_coffee' )
shutil.copy( T.testRoot() + '/Mod/project/t_coffee/final.pir_aln',
self.outfolder + '/t_coffee' )
def test_Analyzer(self):
"""Mod.Analyzer test"""
self.a = Analyse( outFolder = self.outfolder )
self.a.go()
if self.local and self.DEBUG:
self.log.add(
'The result from the analysis is in %s/analyse'%self.outfolder)
def cleanUp(self):
T.tryRemove( self.outfolder, tree=1 )
class ProjectAnalyzeTest( BT.BiskitTest ):
"""
Test case for analyzing a complete modeling project in test/Mod/project
"""
## rename to test_Analyze to include this test
def t_Analyze( self ):
"""
Mod.Analyze full test/Mod/project test
"""
self.outfolder = T.testRoot() + '/Mod/project'
self.a = Analyse( outFolder = self.outfolder )
self.a.go()
if self.local:
print 'The result from the analysis can be found in %s/analyse'%outfolder
if __name__ == '__main__':
BT.localTest()
| ostrokach/biskit | Biskit/Mod/Analyse.py | Python | gpl-3.0 | 22,678 |
from distutils.core import setup
PACKAGE = "inmembrane"
DESCRIPTION = "A bioinformatic pipeline for proteome annotation \
to predict if a protein is exposed on the surface of a bacteria."
AUTHOR = "Andrew Perry & Bosco Ho"
AUTHOR_EMAIL = "ajperry@pansapiens.com"
URL = "http://github.com/boscoh/inmembrane"
# Must be a semantic version number. Also update inmembrane/__init__.py
VERSION = "0.95.0" # __import__(PACKAGE).__version__
try:
extra_requires = []
from collections import OrderedDict
except:
extra_requires.append("ordereddict")
setup(
name=PACKAGE,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
packages=['inmembrane', 'inmembrane.plugins',
'inmembrane.protocols', 'inmembrane.tests'],
# NOTE: some packaging filters are also in MANIFEST.in
package_data={'inmembrane': ['protocols/*/*',
'tests/*/*',
'plugins/*/*'], },
scripts=['inmembrane_scan'],
# README, examples & docs are included via MANIFEST.in
license='BSD',
long_description=open('README.rst', 'rt').read(),
install_requires=["BeautifulSoup >= 3.2.1",
"bs4",
"cssselect",
"lxml",
"requests >= 2.0.0",
"semantic_version",
"suds >= 0.4",
"twill == 0.9.1",
] + extra_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
zip_safe=False,
)
| boscoh/inmembrane | setup.py | Python | bsd-2-clause | 1,904 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.