code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import peewee
from flask import current_app,abort
from flask.ext.login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from peewee import Model, IntegerField, CharField,PrimaryKeyField
from website.app import db_wrapper, login_manager
from website.http.main_exception import MainException
from werkzeug.security import check_password_hash,generate_password_hash
class User(UserMixin, db_wrapper.Model):
id = PrimaryKeyField()
email = CharField(index=True)
username = CharField(index=True)
password_hash = CharField()
role_id = IntegerField()
confirmed = IntegerField()
class Meta:
db_table = 'users'
def register(self,email,password,username):
user = User(email=email, username=username, password_hash=generate_password_hash(password))
try:
user.save()
except peewee.IntegrityError as err:
print(err.args)
if err.args[0] == 1062:
if 'ix_users_email' in err.args[1]:
raise MainException.DUPLICATE_EMAIL
if 'ix_users_username' in err.args[1]:
raise MainException.DUPLICATE_USERNAME
return user
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""生成验证邮箱的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
"""验证邮箱"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
print(data)
except:
return False
if data.get('confirm') != self.id:
return False
# 验证成功,写入数据库
self.confirmed = True
self.save()
return True
def generate_reset_token(self, expiration=3600):
"""生成重置密码的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
"""重置密码"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
# 验证成功,写入数据库
self.password = new_password
self.save()
return True
"""
匿名用户
"""
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
user = User.get(User.id == int(user_id))
if not user:
abort(404)
else:
return user
|
alexli0707/pyforum
|
website/models/user.py
|
Python
|
apache-2.0
| 3,011
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class _Getch(object):
"""
Gets a single character from standard input. Does not echo to
the screen (reference: http://code.activestate.com/recipes/134892/)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix(object):
def __init__(self):
import tty
def __call__(self):
import sys
import termios
import tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows(object):
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon(object):
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
getch = _Getch()
|
glaudsonml/kurgan-ai
|
tools/sqlmap/lib/utils/getch.py
|
Python
|
apache-2.0
| 2,305
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="colorsrc",
parent_name="sankey.node.hoverlabel.font",
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/sankey/node/hoverlabel/font/_colorsrc.py
|
Python
|
mit
| 454
|
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
import os
import yaml
rep122_install_command = """#!/bin/bash
set -o errexit
mkdir -p build
cd build
cmake ..
make
echo "About to run checkinstall make install"
sudo checkinstall -y --nodoc --pkgname=yaml-cpp-sourcedep make install
"""
rep122_check_presence_command = """#!/bin/bash
dpkg-query -W -f='${Package} ${Status}\\n' yaml-cpp-sourcedep | awk '{\\
if ($4 =="installed")
exit 0
else
print "yaml-cpp-sourcedep not installed"
exit 1}'
"""
REP112_MD5SUM = '77040d44b0e620c092bce918ac7b4180'
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'source'))
def _subtest_rep112_rdmanifest(resolved):
test_dir = get_test_dir()
path = os.path.join(test_dir, 'rep112-example.rdmanifest')
manifest = yaml.load(open(path))
assert resolved.manifest == manifest
assert resolved.manifest_url == path
assert resolved.install_command == rep122_install_command
assert resolved.check_presence_command == rep122_check_presence_command
assert len(resolved.check_presence_command) == len(rep122_check_presence_command), "%s %s"%(len(resolved.check_presence_command), len(rep122_check_presence_command))
assert resolved.exec_path == 'yaml-cpp-0.2.5'
assert resolved.tarball == 'https://yaml-cpp.googlecode.com/files/yaml-cpp-0.2.5.tar.gz'
assert resolved.alternate_tarball == None
assert resolved.tarball_md5sum == 'b17dc36055cd2259c88b2602601415d9'
def test_SourceInstall():
from rosdep2.platforms.source import InvalidRdmanifest, SourceInstall
#tripwire
SourceInstall()
# test unpacking of dict
manifest = {
'md5sum': 'fake-md5',
'exec-path': '/path',
'install-script': 'echo hello',
'check-presence-script': 'hello there',
'uri': 'http://ros.org/',
'alternate-uri': 'http://turtlebot.com/',
'depends': ['foo', 'bar'],
}
resolved = SourceInstall.from_manifest(manifest, 'fake-url')
assert resolved.manifest == manifest
assert resolved.manifest_url == 'fake-url'
assert resolved.install_command == 'echo hello'
assert resolved.check_presence_command == 'hello there'
assert resolved.exec_path == '/path'
assert resolved.tarball == 'http://ros.org/'
assert resolved.alternate_tarball == 'http://turtlebot.com/'
assert resolved.tarball_md5sum == 'fake-md5'
test_dir = get_test_dir()
path = os.path.join(test_dir, 'rep112-example.rdmanifest')
manifest = yaml.load(open(path))
resolved = SourceInstall.from_manifest(manifest, path)
_subtest_rep112_rdmanifest(resolved)
#TODO: test depends
# test with bad dicts
manifest = {
'md5sum': 'fake-md5',
'exec-path': '/path',
'install-script': 'echo hello',
'check-presence-script': 'hello there',
'alternate-uri': 'http://turtlebot.com/',
'depends': ['foo', 'bar'],
}
# uri is required
try:
SourceInstall.from_manifest(manifest, 'foo')
assert False, "should have raised"
except InvalidRdmanifest as e:
pass
# test defaults
manifest = dict(uri='http://ros.org/')
resolved = SourceInstall.from_manifest(manifest, 'foo')
assert resolved.exec_path == '.'
assert resolved.install_command == ''
assert resolved.check_presence_command == ''
assert resolved.alternate_tarball is None
assert resolved.tarball_md5sum is None
def test_is_installed():
from rosdep2.platforms.source import SourceInstaller, SourceInstall
resolved = SourceInstall()
resolved.check_presence_command = """#!/bin/bash
exit 0
"""
installer = SourceInstaller()
assert installer.is_installed(resolved)
def test_source_detect():
from rosdep2.platforms.source import source_detect, SourceInstall
resolved = SourceInstall()
resolved.check_presence_command = """#!/bin/bash
exit 0
"""
assert [] == source_detect([])
assert [resolved] == source_detect([resolved])
def yes(*args, **kwds): return 0
def no(*args, **kwds): return 1
resolved = [SourceInstall(), SourceInstall(), SourceInstall(), SourceInstall()]
for r in resolved:
r.check_presence_command = ''
retval = source_detect(resolved, exec_fn=yes)
assert resolved == retval, retval
assert [] == source_detect(resolved, exec_fn=no)
def test_SourceInstaller_get_install_command():
from rosdep2.platforms.source import SourceInstaller, SourceInstall
installer = SourceInstaller()
resolved = SourceInstall()
resolved.manifest_url = 'http://fake/foo'
resolved.check_presence_command = """#!/bin/bash
exit 1
"""
commands = installer.get_install_command([resolved])
assert len(commands) == 1
assert commands[0] == ['rosdep-source', 'install', 'http://fake/foo']
resolved = SourceInstall()
resolved.manifest_url = 'http://fake/foo'
resolved.check_presence_command = """#!/bin/bash
exit 0
"""
commands = installer.get_install_command([resolved])
assert not(commands)
def test_SourceInstaller_resolve():
from rosdep2.platforms.source import SourceInstaller, InvalidData
test_dir = get_test_dir()
url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest')
md5sum_good = REP112_MD5SUM
md5sum_bad = 'fake'
installer = SourceInstaller()
try:
installer.resolve({})
assert False, "should have raised"
except InvalidData:
pass
try:
installer.resolve(dict(uri=url, md5sum=md5sum_bad))
assert False, "should have raised"
except InvalidData:
pass
resolved = installer.resolve(dict(uri=url, md5sum=md5sum_good))
assert type(resolved) == list
assert len(resolved) == 1
# test for reinstall (to check the depends in rdmanifest)
dependencies = installer.get_depends(dict(uri=url, md5sum=md5sum_good))
assert dependencies == ['checkinstall'], "Dependencies should resolve to checkinstall listed in the rdmanifest."
resolved = resolved[0]
assert resolved.install_command == rep122_install_command
assert resolved.check_presence_command == rep122_check_presence_command
# test again to activate caching
resolved = installer.resolve(dict(uri=url, md5sum=md5sum_good))
assert type(resolved) == list, "Cache should also return a list"
assert len(resolved) == 1
resolved = resolved[0]
assert resolved.install_command == rep122_install_command
assert resolved.check_presence_command == rep122_check_presence_command
def test_load_rdmanifest():
from rosdep2.platforms.source import load_rdmanifest, InvalidRdmanifest
# load_rdmanifest is just a YAML unmarshaller with an exception change
assert 'str' == load_rdmanifest('str')
assert {'a': 'b'} == load_rdmanifest('{a: b}')
try:
load_rdmanifest(';lkajsdf;klj ;l: a;kljdf;: asdf\n ;asdfl;kj')
assert False, "should have raised"
except InvalidRdmanifest as e:
pass
def test_get_file_hash():
from rosdep2.platforms.source import get_file_hash
path = os.path.join(get_test_dir(), 'rep112-example.rdmanifest')
assert REP112_MD5SUM == get_file_hash(path)
def test_fetch_file():
test_dir = get_test_dir()
with open(os.path.join(test_dir, 'rep112-example.rdmanifest')) as f:
expected = f.read()
from rosdep2.platforms.source import fetch_file
url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest')
contents, error = fetch_file(url, REP112_MD5SUM)
assert not error
assert contents == expected
contents, error = fetch_file(url, 'badmd5')
assert bool(error), "should have errored"
assert not contents
contents, error = fetch_file('http://badhostname.willowgarage.com', 'md5sum')
assert not contents
assert bool(error), "should have errored"
def test_download_rdmanifest():
test_dir = get_test_dir()
with open(os.path.join(test_dir, 'rep112-example.rdmanifest')) as f:
expected = yaml.load(f)
from rosdep2.platforms.source import download_rdmanifest, DownloadFailed
url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest')
contents, download_url = download_rdmanifest(url, REP112_MD5SUM)
assert contents == expected
assert download_url == url
# test alt_url
contents, download_url = download_rdmanifest('http://badhostname.willowgarage.com/', REP112_MD5SUM, alt_url=url)
assert contents == expected
assert download_url == url
# test md5sum validate
try:
contents, error = download_rdmanifest(url, 'badmd5')
assert False, "should have errored"
except DownloadFailed:
pass
# test download verify
try:
contents, error = download_rdmanifest('http://badhostname.willowgarage.com', 'fakemd5')
assert False, "should have errored"
except DownloadFailed:
pass
def test_install_from_file():
from rosdep2.platforms.source import install_from_file
f = os.path.join(get_test_dir(), 'noop-not-installed.rdmanifest')
install_from_file(f)
def test_install_source():
from rosdep2.platforms.source import install_source, SourceInstall
resolved = SourceInstall()
resolved.tarball = 'https://github.com/ros-infrastructure/rosdep/raw/master/test/source/foo.tar.gz'
resolved.tarball_md5sum = 'fd34dc39f8f192b97fcc191fe0a6befc'
resolved.install_command = """#!/bin/sh
exit 0
"""
resolved.exec_path = ''
install_source(resolved)
|
sorki/rosdep
|
test/test_rosdep_source.py
|
Python
|
bsd-3-clause
| 11,143
|
# -*- Mode: Python; test-case-name: flumotion.test.test_ui_fgtk -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gobject
from flumotion.common import testsuite
try:
import gtk
from flumotion.ui.fvumeter import FVUMeter
except RuntimeError:
import os
os._exit(0)
attr = testsuite.attr
INTERVAL = 100 # in ms
class VUTest(testsuite.TestCase):
def testScale(self):
w = FVUMeter()
self.assertEquals(w.iec_scale(-80.0), 0.0)
self.assertEquals(w.iec_scale(-70.0), 0.0)
self.assertEquals(w.iec_scale(-60.0), 2.5)
self.assertEquals(w.iec_scale(-50.0), 7.5)
self.assertEquals(w.iec_scale(-40.0), 15)
self.assertEquals(w.iec_scale(-30.0), 30)
self.assertEquals(w.iec_scale(-20.0), 50)
self.assertEquals(w.iec_scale(-10.0), 75)
self.assertEquals(w.iec_scale(0.0), 100)
def testGetSet(self):
w = FVUMeter()
w.set_property('peak', -50.0)
self.assertEquals(w.get_property('peak'), -50.0)
w.set_property('decay', -50.0)
self.assertEquals(w.get_property('decay'), -50.0)
w.set_property('orange-threshold', -50.0)
self.assertEquals(w.get_property('orange-threshold'), -50.0)
w.set_property('red-threshold', -50.0)
self.assertEquals(w.get_property('red-threshold'), -50.0)
@attr('slow')
def testWidget(self):
w = FVUMeter()
window = gtk.Window()
window.add(w)
window.show_all()
gobject.timeout_add(0 * INTERVAL, w.set_property, 'peak', -50.0)
gobject.timeout_add(1 * INTERVAL, w.set_property, 'peak', -5.0)
gobject.timeout_add(2 * INTERVAL, gtk.main_quit)
gtk.main()
# these four calls make sure the window doesn't hang around during
# other tests
window.hide()
gtk.main_iteration()
window.destroy()
gtk.main_iteration()
|
ylatuya/Flumotion
|
flumotion/test/test_ui_fgtk.py
|
Python
|
gpl-2.0
| 2,724
|
import logging
import math
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from experiments_manager.models import Experiment
from marketplace.models import (ExternalPackage, InternalPackage, Package,
PackageResource, PackageVersion)
from .forms import RegisterForm, WorkbenchUserForm
from .models import WorkbenchUser, get_workbench_user
logger = logging.getLogger(__name__)
@login_required
def index(request):
workbench_user = WorkbenchUser.objects.get(user=request.user)
experiments = Experiment.objects.filter(owner=workbench_user).order_by('-created')[:5]
packages = InternalPackage.objects.filter(owner=workbench_user).order_by('-created')[:5]
logger.info('%s accessed index', workbench_user)
recent_versions = list(PackageVersion.objects.all().order_by('-created')[:5])
recent_resources = list(PackageResource.objects.all().order_by('-created')[:5])
recent_internal = list(InternalPackage.objects.all().order_by('-created')[:5])
recent_external = list(ExternalPackage.objects.all().order_by('-created')[:5])
recent_experiments = list(Experiment.objects.filter(public=True).order_by('created')[:5])
total_list = recent_versions + recent_resources + recent_internal + recent_external + recent_experiments
total_list = reversed(sorted(total_list, key=lambda x: x.created))
return render(request, 'index.html', {'experiments': experiments,
'packages': packages,
'activities': total_list})
class DetailProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
return render(request, "user_manager/workbenchuser_detail.html", {'workbench_user': workbench_user})
class EditProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(instance=workbench_user)
logger.info('%s edit get profile view', workbench_user)
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def post(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(request.POST, instance=workbench_user)
if form.is_valid():
current_password = form.cleaned_data['current_password']
user = workbench_user.user
if current_password:
if user.check_password(current_password) and change_password_of_user(workbench_user, form):
messages.add_message(request, messages.SUCCESS, 'Your password has been changed.')
else:
messages.add_message(request, messages.ERROR, 'Passwords did not match '
'or incorrect current password.')
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
form.save()
logger.info('%s edited profile successfully', workbench_user)
return redirect(to='/')
else:
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def change_password_of_user(w_user, form):
new_password = form.cleaned_data['new_password']
new_password_again = form.cleaned_data['new_password_again']
if new_password == new_password_again:
user = w_user.user
user.set_password(new_password)
user.save()
return True
return False
class RegisterView(View):
def get(self, request):
form = RegisterForm()
return render(request, 'user_manager/register.html', {'form': form})
def post(self, request):
form = RegisterForm(self.request.POST)
if form.is_valid():
new_email = form.cleaned_data['email']
if not existing_user_check(new_email):
user = User.objects.create_user(form.cleaned_data['username'],
new_email,
form.cleaned_data['password'])
workbench_user = WorkbenchUser.objects.get(user=user)
workbench_user.netid = form.cleaned_data['netid']
workbench_user.save()
logger.info('new user created: %s', workbench_user)
return redirect(to='/')
else:
return render(request, 'user_manager/register.html', {'form': form})
else:
return render(request, 'user_manager/register.html', {'form': form})
def existing_user_check(email_address):
return User.objects.filter(email=email_address)
class WorkbenchUserDetailView(View):
def get(self, request, username):
workbench_user = get_object_or_404(WorkbenchUser, user__username=username)
recent_experiments = Experiment.objects.filter(owner=workbench_user, completed=True).order_by('-created')[:5]
recent_packages = Package.objects.filter(owner=workbench_user).order_by('-created')[:5]
return render(request, "user_manager/user_profile.html", {'w_user': workbench_user,
'experiments': recent_experiments,
'packages': recent_packages})
@login_required
def search(request):
if 'q' in request.GET:
q = request.GET.get('q')
page = request.GET.get('page')
page = int(page) if page is not None else 1
results, nr_of_pages = get_search_results(request.user, q, page)
return render(request, 'search.html', {'results': results, 'query': q, 'page': page,
'next_page': page + 1,
'previous_page': page - 1,
'nr_of_pages': nr_of_pages,
'nr_of_pages_range': range(1, nr_of_pages+1)})
return render(request, 'search.html', {})
def get_search_results(user, q, page_nr=1, page_size=25):
start_value = (page_nr - 1) * page_size
end_value = start_value + page_size
search_query_list = build_search_queries(q, user)
total_count = sum([x.count() for x in search_query_list])
nr_of_pages = int(math.ceil(total_count / page_size))
total_list = [list(x.order_by('-created')[start_value:end_value]) for x in search_query_list]
total_flat_list = [item for sublist in total_list for item in sublist]
total_flat_list = sorted(total_flat_list, key=lambda x: x.created)
return total_flat_list, nr_of_pages
def build_search_queries(q, user):
package_version_query = PackageVersion.objects.filter(version_nr__contains=q)
package_resource_query = PackageResource.objects.filter(title__contains=q)
internal_package_query = InternalPackage.objects.filter(name__contains=q)
external_package_query = ExternalPackage.objects.filter(name__contains=q)
users_query = WorkbenchUser.objects.filter(user__username=q)
experiment_query = Experiment.objects.filter(Q(owner__user=user, title__contains=q) |
Q(completed=True, title__contains=q))
return package_version_query, package_resource_query, internal_package_query, external_package_query, \
experiment_query, users_query
|
MOOCworkbench/MOOCworkbench
|
user_manager/views.py
|
Python
|
mit
| 7,680
|
# -*- coding: utf-8 -*-
# This Module is taken in part from the click project and expanded
# see https://github.com/pallets/click/blob/6cafd32/click/_winconsole.py
# Copyright © 2014 by the Pallets team.
# Some rights reserved.
# Redistribution and use in source and binary forms of the software as well as
# documentation, with or without modification, are permitted provided that the
# following conditions are met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND
# DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This module is based on the excellent work by Adam Bartoš who
# provided a lot of what went into the implementation here in
# the discussion to issue1602 in the Python bug tracker.
#
# There are some general differences in regards to how this works
# compared to the original patches as we do not need to patch
# the entire interpreter but just work in our little world of
# echo and prmopt.
import ctypes
import io
import os
import sys
import time
import zlib
from ctypes import (
POINTER,
WINFUNCTYPE,
Structure,
byref,
c_char,
c_char_p,
c_int,
c_ssize_t,
c_ulong,
c_void_p,
create_unicode_buffer,
py_object,
windll,
)
from ctypes.wintypes import HANDLE, LPCWSTR, LPWSTR
from itertools import count
import msvcrt
from six import PY2, text_type
from .compat import IS_TYPE_CHECKING
from .misc import StreamWrapper, run, to_text
try:
from ctypes import pythonapi
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
PyBuffer_Release = pythonapi.PyBuffer_Release
except ImportError:
pythonapi = None
if IS_TYPE_CHECKING:
from typing import Text
c_ssize_p = POINTER(c_ssize_t)
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
("CommandLineToArgvW", windll.shell32)
)
kernel32 = windll.kernel32
GetLastError = kernel32.GetLastError
GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
GetConsoleCursorInfo = kernel32.GetConsoleCursorInfo
GetStdHandle = kernel32.GetStdHandle
LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(("LocalFree", windll.kernel32))
ReadConsoleW = kernel32.ReadConsoleW
SetConsoleCursorInfo = kernel32.SetConsoleCursorInfo
WriteConsoleW = kernel32.WriteConsoleW
# XXX: Added for cursor hiding on windows
STDOUT_HANDLE_ID = ctypes.c_ulong(-11)
STDERR_HANDLE_ID = ctypes.c_ulong(-12)
STDIN_HANDLE = GetStdHandle(-10)
STDOUT_HANDLE = GetStdHandle(-11)
STDERR_HANDLE = GetStdHandle(-12)
STREAM_MAP = {0: STDIN_HANDLE, 1: STDOUT_HANDLE, 2: STDERR_HANDLE}
PyBUF_SIMPLE = 0
PyBUF_WRITABLE = 1
ERROR_SUCCESS = 0
ERROR_NOT_ENOUGH_MEMORY = 8
ERROR_OPERATION_ABORTED = 995
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
EOF = b"\x1a"
MAX_BYTES_WRITTEN = 32767
class Py_buffer(Structure):
_fields_ = [
("buf", c_void_p),
("obj", py_object),
("len", c_ssize_t),
("itemsize", c_ssize_t),
("readonly", c_int),
("ndim", c_int),
("format", c_char_p),
("shape", c_ssize_p),
("strides", c_ssize_p),
("suboffsets", c_ssize_p),
("internal", c_void_p),
]
if PY2:
_fields_.insert(-1, ("smalltable", c_ssize_t * 2))
# XXX: This was added for the use of cursors
class CONSOLE_CURSOR_INFO(Structure):
_fields_ = [("dwSize", ctypes.c_int), ("bVisible", ctypes.c_int)]
# On PyPy we cannot get buffers so our ability to operate here is
# serverly limited.
if pythonapi is None:
get_buffer = None
else:
def get_buffer(obj, writable=False):
buf = Py_buffer()
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
PyBuffer_Release(byref(buf))
def get_long_path(short_path):
# type: (Text, str) -> Text
BUFFER_SIZE = 500
buffer = create_unicode_buffer(BUFFER_SIZE)
get_long_path_name = windll.kernel32.GetLongPathNameW
get_long_path_name(to_text(short_path), buffer, BUFFER_SIZE)
return buffer.value
class _WindowsConsoleRawIOBase(io.RawIOBase):
def __init__(self, handle):
self.handle = handle
def isatty(self):
io.RawIOBase.isatty(self)
return True
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
def readable(self):
return True
def readinto(self, b):
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError(
"cannot read odd number of bytes from " "UTF-16-LE encoded console"
)
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(
self.handle, buffer, code_units_to_be_read, byref(code_units_read), None
)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError("Windows error: %s" % GetLastError())
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
def writable(self):
return True
@staticmethod
def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return "ERROR_SUCCESS"
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return "ERROR_NOT_ENOUGH_MEMORY"
return "Windows error %s" % errno
def write(self, b):
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(
self.handle, buf, code_units_to_be_written, byref(code_units_written), None
)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
class ConsoleStream(object):
def __init__(self, text_stream, byte_stream):
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self):
return self.buffer.name
@property
def fileno(self):
return self.buffer.fileno
def write(self, x):
if isinstance(x, text_type):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
try:
return getattr(self._text_stream, name)
except io.UnsupportedOperation:
return getattr(self.buffer, name)
def isatty(self):
return self.buffer.isatty()
def __repr__(self):
return "<ConsoleStream name=%r encoding=%r>" % (self.name, self.encoding)
class WindowsChunkedWriter(object):
"""
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()' which we wrap to write in
limited chunks due to a Windows limitation on binary console streams.
"""
def __init__(self, wrapped):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
total_to_write = len(text)
written = 0
while written < total_to_write:
to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
self.__wrapped.write(text[written : written + to_write])
written += to_write
_wrapped_std_streams = set()
def _wrap_std_stream(name):
# Python 2 & Windows 7 and below
if PY2 and sys.getwindowsversion()[:2] <= (6, 1) and name not in _wrapped_std_streams:
setattr(sys, name, WindowsChunkedWriter(getattr(sys, name)))
_wrapped_std_streams.add(name)
def _get_text_stdin(buffer_stream):
text_stream = StreamWrapper(
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stdout(buffer_stream):
text_stream = StreamWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stderr(buffer_stream):
text_stream = StreamWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
if PY2:
def _hash_py_argv():
return zlib.crc32("\x00".join(sys.argv[1:]))
_initial_argv_hash = _hash_py_argv()
def _get_windows_argv():
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
try:
argv = [argv_unicode[i] for i in range(0, argc.value)]
finally:
LocalFree(argv_unicode)
del argv_unicode
if not hasattr(sys, "frozen"):
argv = argv[1:]
while len(argv) > 0:
arg = argv[0]
if not arg.startswith("-") or arg == "-":
break
argv = argv[1:]
if arg.startswith(("-c", "-m")):
break
return argv[1:]
_stream_factories = {0: _get_text_stdin, 1: _get_text_stdout, 2: _get_text_stderr}
def _get_windows_console_stream(f, encoding, errors):
if (
get_buffer is not None
and encoding in ("utf-16-le", None)
and errors in ("strict", None)
and hasattr(f, "isatty")
and f.isatty()
):
if isinstance(f, ConsoleStream):
return f
func = _stream_factories.get(f.fileno())
if func is not None:
if not PY2:
f = getattr(f, "buffer", None)
if f is None:
return None
else:
# If we are on Python 2 we need to set the stream that we
# deal with to binary mode as otherwise the exercise if a
# bit moot. The same problems apply as for
# get_binary_stdin and friends from _compat.
msvcrt.setmode(f.fileno(), os.O_BINARY)
return func(f)
def hide_cursor():
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(STDOUT_HANDLE, ctypes.byref(cursor_info))
cursor_info.visible = False
SetConsoleCursorInfo(STDOUT_HANDLE, ctypes.byref(cursor_info))
def show_cursor():
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(STDOUT_HANDLE, ctypes.byref(cursor_info))
cursor_info.visible = True
SetConsoleCursorInfo(STDOUT_HANDLE, ctypes.byref(cursor_info))
def get_stream_handle(stream):
return STREAM_MAP.get(stream.fileno())
def _walk_for_powershell(directory):
for path, dirs, files in os.walk(directory):
powershell = next(
iter(fn for fn in files if fn.lower() == "powershell.exe"), None
)
if powershell is not None:
return os.path.join(directory, powershell)
for subdir in dirs:
powershell = _walk_for_powershell(os.path.join(directory, subdir))
if powershell:
return powershell
return None
def _get_powershell_path():
paths = [
os.path.expandvars(r"%windir%\{0}\WindowsPowerShell").format(subdir)
for subdir in ("SysWOW64", "system32")
]
powershell_path = next(iter(_walk_for_powershell(pth) for pth in paths), None)
if not powershell_path:
powershell_path, _ = run(
["where", "powershell"], block=True, nospin=True, return_object=False
)
if powershell_path:
return powershell_path.strip()
return None
def _get_sid_with_powershell():
powershell_path = _get_powershell_path()
if not powershell_path:
return None
args = [
powershell_path,
"-ExecutionPolicy",
"Bypass",
"-Command",
"Invoke-Expression '[System.Security.Principal.WindowsIdentity]::GetCurrent().user | Write-Host'",
]
sid, _ = run(args, nospin=True)
return sid.strip()
def _get_sid_from_registry():
try:
import winreg
except ImportError:
import _winreg as winreg
var_names = ("%USERPROFILE%", "%HOME%")
current_user_home = next(iter(os.path.expandvars(v) for v in var_names if v), None)
root, subkey = (
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\Windows NT\CurrentVersion\ProfileList",
)
subkey_names = []
value = None
matching_key = None
try:
with winreg.OpenKeyEx(root, subkey, 0, winreg.KEY_READ) as key:
for i in count():
key_name = winreg.EnumKey(key, i)
subkey_names.append(key_name)
value = query_registry_value(
root, r"{0}\{1}".format(subkey, key_name), "ProfileImagePath"
)
if value and value.lower() == current_user_home.lower():
matching_key = key_name
break
except OSError:
pass
if matching_key is not None:
return matching_key
def get_value_from_tuple(value, value_type):
try:
import winreg
except ImportError:
import _winreg as winreg
if value_type in (winreg.REG_SZ, winreg.REG_EXPAND_SZ):
if "\0" in value:
return value[: value.index("\0")]
return value
return None
def query_registry_value(root, key_name, value):
try:
import winreg
except ImportError:
import _winreg as winreg
try:
with winreg.OpenKeyEx(root, key_name, 0, winreg.KEY_READ) as key:
return get_value_from_tuple(*winreg.QueryValueEx(key, value))
except OSError:
return None
def get_current_user():
fns = (_get_sid_from_registry, _get_sid_with_powershell)
for fn in fns:
result = fn()
if result:
return result
return None
|
kennethreitz/pipenv
|
pipenv/vendor/vistir/_winconsole.py
|
Python
|
mit
| 15,863
|
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import netaddr
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import directory
from oslo_config import cfg
from neutron.common import utils
from neutron.db import db_base_plugin_v2
from neutron.extensions import dns
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.plugins.ml2 import test_plugin
class DnsExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return dns.Dns().get_extended_resources(version)
class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2):
"""Test plugin to mixin the DNS Integration extensions.
"""
supported_extension_aliases = ["dns-integration", "router"]
class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase):
"""Test API extension dns attributes.
"""
_extension_drivers = ['dns']
def setUp(self):
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(DnsExtensionTestCase, self).setUp()
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, set_context=False, tenant_id=None,
**kwargs):
new_arg_list = ('dns_domain',)
if arg_list is not None:
new_arg_list = arg_list + new_arg_list
return super(DnsExtensionTestCase,
self)._create_network(fmt, name, admin_state_up,
arg_list=new_arg_list,
set_context=set_context,
tenant_id=tenant_id,
**kwargs)
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, set_context=False, tenant_id=None,
**kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'port': {'network_id': net_id,
'tenant_id': tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups',
'dns_name') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
# create a dhcp port device id if one hasn't been supplied
if ('device_owner' in kwargs and
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
'host' in kwargs and
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
port_req = self.new_create_request('ports', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', tenant_id)
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
[i[resource]['id'] for i in items])
return res
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
subnet_db = directory.get_plugin().get_subnet(
context.get_admin_context(), ips[0]['subnet_id'])
self.assertIn(netaddr.IPAddress(ips[0]['ip_address']),
netaddr.IPSet(netaddr.IPNetwork(subnet_db['cidr'])))
self.assertEqual('myname', port['port']['name'])
self._verify_dns_assigment(port['port'],
ips_list=[ips[0]['ip_address']])
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as v1, self.port() as v2, self.port() as v3:
ports = (v1, v2, v3)
res = self._test_list_resources('port', ports)
for port in res['ports']:
self._verify_dns_assigment(
port, ips_list=[port['fixed_ips'][0]['ip_address']])
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
self._verify_dns_assigment(
sport['port'],
ips_list=[sport['port']['fixed_ips'][0]['ip_address']])
def test_update_port_non_default_dns_domain_with_dns_name(self):
with self.port() as port:
port_ip = port['port']['fixed_ips'][0]['ip_address']
cfg.CONF.set_override('dns_domain', 'example.com')
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['port']['admin_state_up'],
res['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=[port_ip],
dns_name='vm1')
def test_update_port_default_dns_domain_with_dns_name(self):
with self.port() as port:
port_ip = port['port']['fixed_ips'][0]['ip_address']
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['port']['admin_state_up'],
res['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=[port_ip])
def _verify_dns_assigment(self, port, ips_list=None, exp_ips_ipv4=0,
exp_ips_ipv6=0, ipv4_cidrs=None, ipv6_cidrs=None,
dns_name=''):
ips_list = ips_list or []
ipv4_cidrs = ipv4_cidrs or []
ipv6_cidrs = ipv6_cidrs or []
self.assertEqual(dns_name, port['dns_name'])
dns_assignment = port['dns_assignment']
if ips_list:
self.assertEqual(len(dns_assignment), len(ips_list))
ips_set = set(ips_list)
else:
self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6)
ipv4_count = 0
ipv6_count = 0
subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs]
subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs]
request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn(
dns_name)
for assignment in dns_assignment:
if ips_list:
self.assertIn(assignment['ip_address'], ips_set)
ips_set.remove(assignment['ip_address'])
else:
ip = netaddr.IPAddress(assignment['ip_address'])
if ip.version == 4:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4))
ipv4_count += 1
else:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6))
ipv6_count += 1
hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name,
request_fqdn,
assignment)
self.assertEqual(assignment['hostname'], hostname)
self.assertEqual(assignment['fqdn'], fqdn)
if ips_list:
self.assertFalse(ips_set)
else:
self.assertEqual(ipv4_count, exp_ips_ipv4)
self.assertEqual(ipv6_count, exp_ips_ipv6)
def _get_dns_domain(self):
if not cfg.CONF.dns_domain:
return ''
if cfg.CONF.dns_domain.endswith('.'):
return cfg.CONF.dns_domain
return '%s.' % cfg.CONF.dns_domain
def _get_request_hostname_and_fqdn(self, dns_name):
request_dns_name = ''
request_fqdn = ''
dns_domain = self._get_dns_domain()
if dns_name and dns_domain and dns_domain != 'openstacklocal.':
request_dns_name = dns_name
request_fqdn = request_dns_name
if not request_dns_name.endswith('.'):
request_fqdn = '%s.%s' % (dns_name, dns_domain)
return request_dns_name, request_fqdn
def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn,
assignment):
dns_domain = self._get_dns_domain()
if request_dns_name:
hostname = request_dns_name
fqdn = request_fqdn
else:
hostname = 'host-%s' % assignment['ip_address'].replace(
'.', '-').replace(':', '-')
fqdn = hostname
if dns_domain:
fqdn = '%s.%s' % (hostname, dns_domain)
return hostname, fqdn
def _verify_ip_in_subnet(self, ip, subnets_list):
for subnet in subnets_list:
if ip in subnet:
return True
return False
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
fixed_ip_data = [{'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
fixed_ip_data = [{'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.10',
'subnet_id': subnet['subnet']['id']}, ips)
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.10',
'10.0.0.2'])
def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain(
self):
cfg.CONF.set_override('dns_domain', '')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'openstacklocal.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(201, res.status_code)
def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.bad-domain.com.')
self.assertEqual(400, res.status_code)
expected_error = ('The dns_name passed is a FQDN. Its higher level '
'labels must be equal to the dns_domain option in '
'neutron.conf')
self.assertIn(expected_error, res.text)
def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
num_labels = int(
math.floor(db_const.FQDN_FIELD_SIZE / constants.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(db_const.FQDN_FIELD_SIZE % constants.DNS_LABEL_MAX_LEN))
dns_name = (('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * filler_len)
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name=dns_name)
self.assertEqual(400, res.status_code)
expected_error = ("When the two are concatenated to form a FQDN "
"(with a '.' at the end), the resulting length "
"exceeds the maximum size")
self.assertIn(expected_error, res.text)
def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self,
dns_name=''):
"""Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dicts = [
{'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': 'fe80::1', 'cidr': 'fe80::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe81::1', 'cidr': 'fe81::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe82::1', 'cidr': 'fe82::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
{'gateway': 'fe83::1', 'cidr': 'fe83::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
subnets = {}
for sub_dict in sub_dicts:
subnet = self._make_subnet(
self.fmt, network,
gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
subnets[subnet['subnet']['id']] = sub_dict
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
if res.status_code != 201:
return res
port = self.deserialize(self.fmt, res)
# Since the create port request was made without a list of fixed IPs,
# the port should be associated with addresses for one of the
# IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
# SLAAC subnets.
self.assertEqual(4, len(port['port']['fixed_ips']))
addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
constants.IPV6_SLAAC: 0}
for fixed_ip in port['port']['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
if subnet_id in subnets:
addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
self.assertEqual(1, addr_mode_count[None])
self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
self._verify_dns_assigment(port['port'], exp_ips_ipv4=1,
exp_ips_ipv6=3,
ipv4_cidrs=[sub_dicts[0]['cidr'],
sub_dicts[1]['cidr']],
ipv6_cidrs=[sub_dicts[2]['cidr'],
sub_dicts[3]['cidr'],
sub_dicts[4]['cidr'],
sub_dicts[5]['cidr']],
dns_name=dns_name)
return res
def test_api_extension_validation_with_bad_dns_names(self):
num_labels = int(
math.floor(db_const.FQDN_FIELD_SIZE / constants.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(db_const.FQDN_FIELD_SIZE % constants.DNS_LABEL_MAX_LEN))
dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-',
'-vm01.test1', 'vm01.-test1', 'vm01._test1',
'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.',
'vm01.123.', '-' + 'a' * constants.DNS_LABEL_MAX_LEN,
'a' * (constants.DNS_LABEL_MAX_LEN + 1),
('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len + 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(400, res.status_code)
error_message = res.json['NeutronError']['message']
is_expected_message = (
'cannot be converted to lowercase string' in error_message or
'not a valid PQDN or FQDN. Reason:' in error_message or
'must be string type' in error_message)
self.assertTrue(is_expected_message)
def test_api_extension_validation_with_good_dns_names(self):
cfg.CONF.set_override('dns_domain', 'example.com')
higher_labels_len = len('example.com.')
num_labels = int(
math.floor((db_const.FQDN_FIELD_SIZE - higher_labels_len) /
constants.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor((db_const.FQDN_FIELD_SIZE - higher_labels_len) %
constants.DNS_LABEL_MAX_LEN))
dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.',
'8vm01', 'vm-01.example.com.', 'vm01.test',
'vm01.test.example.com.', 'vm01.test-100',
'vm01.test-100.example.com.',
'a' * constants.DNS_LABEL_MAX_LEN,
('a' * constants.DNS_LABEL_MAX_LEN) + '.example.com.',
('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len - 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(201, res.status_code)
class DnsExtensionTestNetworkDnsDomain(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_dns.' +
'DnsExtensionTestPlugin')
ext_mgr = DnsExtensionManager()
super(DnsExtensionTestNetworkDnsDomain, self).setUp(
plugin=plugin, ext_mgr=ext_mgr)
def test_update_network_dns_domain(self):
with self.network() as network:
data = {'network': {'dns_domain': 'my-domain.org.'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = req.get_response(self.api)
self.assertEqual(200, res.status_code)
self.assertNotIn('dns_domain',
self.deserialize(self.fmt, res)['network'])
|
huntxu/neutron
|
neutron/tests/unit/extensions/test_dns.py
|
Python
|
apache-2.0
| 24,979
|
" fuel tank "
from gpkit import Model, Variable
class FuelTank(Model):
"""
Returns the weight of the fuel tank. Assumes a cylinder shape with some
fineness ratio
"""
def setup(self, Wfueltot):
W = Variable("W", "lbf", "fuel tank weight")
f = Variable("f", 0.03, "-", "fraction fuel tank weight to fuel weight")
mfac = Variable("m_{fac}", 1.1, "-", "fuel volume margin factor")
rhofuel = Variable("\\rho_{fuel}", 6.01, "lbf/gallon",
"density of 100LL")
Vol = Variable("\\mathcal{V}", "ft^3", "fuel tank volume")
constraints = [W >= f*Wfueltot,
Vol/mfac >= Wfueltot/rhofuel,
]
return constraints
|
convexengineering/gplibrary
|
gpkitmodels/GP/aircraft/fuselage/fuel_tank.py
|
Python
|
mit
| 747
|
from re import compile
from django.conf import settings
from django.http import HttpResponseRedirect
EXEMPT_URLS = [compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware(object):
def process_request(self, request):
print request.path
if request.path == settings.LOGIN_URL:
return None
if not request.user.is_authenticated():
path = request.path_info.lstrip('/')
if not any(m.match(path) for m in EXEMPT_URLS):
return HttpResponseRedirect(('%s?next=%s' % (
settings.LOGIN_URL, request.path)))
|
nborkowska/kpir
|
kpir/kpir/middleware.py
|
Python
|
gpl-3.0
| 755
|
# This script opens up all of the graphs from disk, and performs learning on
# them. This script is best run on the Rous cluster, where all of the graphs
# are located on disk (it occupies ~200GB of space, because pickling is
# not efficient.
from graphfp.layers import FingerprintLayer, LinearRegressionLayer,\
GraphConvLayer
from graphfp.utils import initialize_network, batch_sample, train_test_split
from graphfp.optimizers import adam
from graphfp.flatten import flatten
from time import time
from tqdm import tqdm
from autograd import grad
import json
import pickle as pkl
import os
import pandas as pd
import autograd.numpy as np
import sys
def read_data():
"""
Reads all of the protein graphs into memory from disk.
Also reads in the data table into memory.
"""
with open('../../data/batch_summary.json') as f:
model_data = json.load(f)
# Read in the quantitative data
protease_data = pd.read_csv(
'../../data/hiv_data/hiv-protease-data-expanded.csv',
index_col=0)
drug_data = protease_data.dropna(subset=['FPV'])[['FPV', 'seqid']]
drug_data['FPV'] = drug_data['FPV'].apply(np.log10)
proj_titles = {c['title']: c['code'] for c in model_data['projects']}
all_graphs = []
n_graphs = len(drug_data)
# n_graphs = 200
for i, (seqid, project) in tqdm(enumerate(proj_titles.items())):
if len(all_graphs) < n_graphs and\
seqid in drug_data['seqid'].values:
# print(seqid, project)
# We use the try/except pattern, just in case there's some problem
# with graph reading.
try:
with open('../../data/batch_models/{0}/model_01.pkl'.format(
project), 'rb') as f:
p = pkl.load(f)
p.graph['input_shape'] = p.nodes(data=True)[0][1][
'features'].shape
# print(p.graph['input_shape'])
p.graph['project'] = project
p.graph['seqid'] = seqid
all_graphs.append(p)
except:
print('did not add graph for {0}'.format(project))
pass
return all_graphs, drug_data
def train_loss(wb_vect, unflattener, cv=False, batch=True, batch_size=10,
debug=False):
"""
Training loss is MSE.
We pass in a flattened parameter vector and its unflattener.
"""
wb_struct = unflattener(wb_vect)
if batch:
batch_size = batch_size
else:
batch_size = len(graphs)
if cv and not batch:
samp_graphs, samp_inputs = batch_sample(test_graphs, input_shape,
batch_size=len(test_graphs))
else:
samp_graphs, samp_inputs = batch_sample(graphs, input_shape,
batch_size)
preds = predict(wb_struct, samp_inputs, samp_graphs)
graph_ids = [g.graph['seqid'] for g in samp_graphs]
graph_scores = drug_data.set_index('seqid').ix[graph_ids]['FPV'].values.\
reshape(preds.shape)
# print(graph_scores)
assert preds.shape == graph_scores.shape
mse = np.mean(np.power(preds - graph_scores, 2))
if debug:
print(graph_ids)
print('Predictions:')
print(preds)
print('Mean: {0}'.format(np.mean(preds)))
print('')
print('Actual')
print(graph_scores)
print('Mean: {0}'.format(np.mean(graph_scores)))
print('')
print('Difference')
print(preds - graph_scores)
print('Mean Squared Error: {0}'.format(mse))
print('')
return mse
def predict(wb_struct, inputs, graphs):
"""
Makes predictions by running the forward pass over all of the layers.
Parameters:
===========
- wb_struct: a dictionary of weights and biases stored for each layer.
- inputs: the input data matrix. should be one row per graph.
- graphs: a list of all graphs.
"""
curr_inputs = inputs
for i, layer in enumerate(layers):
wb = wb_struct['layer{0}_{1}'.format(i, layer)]
curr_inputs = layer.forward_pass(wb, curr_inputs, graphs)
return curr_inputs
def callback(wb, i):
"""
Any function you want to run at each iteration of the optimization.
"""
wb_vect, wb_unflattener = flatten(wb)
print('Iteration: {0}'.format(i))
print('Training Loss: ')
# Record training set train_loss
tl = train_loss(wb_vect, wb_unflattener, batch=True, batch_size=10)
print(tl)
trainloss.append(tl)
# Record the preds vs. actual for the training set.
batch_size = 10 # len(graphs)
samp_graphs, samp_inputs = batch_sample(graphs, input_shape,
batch_size)
print('callback batch size: {0}'.format(len(samp_graphs)))
preds = predict(wb, samp_inputs, samp_graphs)
graph_ids = [g.graph['seqid'] for g in samp_graphs]
graph_scores = drug_data.set_index('seqid').ix[graph_ids]['FPV'].\
values.reshape(preds.shape)
preds_vs_actual[i] = dict()
preds_vs_actual[i]['preds'] = preds
preds_vs_actual[i]['actual'] = graph_scores
# Record cross-validation train_loss
cv_tl = train_loss(wb_vect, wb_unflattener, cv=True)
trainloss_cv.append(cv_tl)
# Record the preds vs. actual for the test set
batch_size = 10
samp_graphs, samp_inputs = batch_sample(test_graphs, input_shape,
batch_size)
preds_cv = predict(wb, samp_inputs, samp_graphs)
graph_ids = [g.graph['seqid'] for g in samp_graphs]
graph_scores_cv = drug_data.set_index('seqid').ix[graph_ids]['FPV'].\
values.reshape(preds_cv.shape)
preds_vs_actual_cv[i] = dict()
preds_vs_actual_cv[i]['preds'] = preds_cv
preds_vs_actual_cv[i]['actual'] = graph_scores_cv
print('cross-validated training loss: {0}'.format(cv_tl))
# Report on the expected time left.
time_elapsed = time() - start
print('Total time: {0} min {1} sec'.format(int(time_elapsed / 60),
time_elapsed % 60))
time_left = (num_iters - i + 1) * (time_elapsed / (i + 1))
print('Expected time left: {0} min {1} sec'.format(int(time_left / 60),
time_left % 60))
print('')
if __name__ == '__main__':
num_iters = int(sys.argv[1])
start = time()
# Read in all of the graphs
all_graphs, drug_data = read_data()
print('total num of graphs: {0}'.format(len(all_graphs)))
# Split the graphs into a training and testing set.
# Also reads in the data table into memory.
graphs, test_graphs = train_test_split(all_graphs, test_fraction=0.2)
# Define the gradient function
grad_tl = grad(train_loss)
# Specify neural network shape and meta-parameters
input_shape = graphs[0].graph['input_shape']
print('input shape: {0}'.format(input_shape))
layers = [GraphConvLayer((input_shape[1], input_shape[1])),
GraphConvLayer((input_shape[1], input_shape[1])),
FingerprintLayer(input_shape[1]),
LinearRegressionLayer((input_shape[1], 1)),
]
# Initialize neural network weights and baises, as well as an empty
# container for holding the training losses.
wb_all = initialize_network(input_shape, graphs, layers)
trainloss = list()
preds_vs_actual = dict()
trainloss_cv = list()
preds_vs_actual_cv = dict()
# Train the neural network on the data.
wb_vect, wb_unflattener = adam(grad_tl, wb_all, callback=callback,
num_iters=num_iters)
wb_all = wb_unflattener(wb_vect)
# Write training losses and weights/biases to disk.
if not os.path.exists('outputs'):
os.mkdir('outputs')
handle = 'all-graphs_{0}-iters'.format(num_iters)
with open('outputs/{0}_trainloss.pkl'.format(handle), 'wb') as f:
pkl.dump(trainloss, f)
with open('outputs/{0}_wbs.pkl'.format(handle), 'wb') as f:
pkl.dump(wb_all, f)
with open('outputs/{0}_predsactual.pkl'.format(handle), 'wb') as f:
pkl.dump(preds_vs_actual, f)
with open('outputs/{0}_trainloss_cv.pkl'.format(handle), 'wb') as f:
pkl.dump(trainloss_cv, f)
with open('outputs/{0}_predsactual_cv.pkl'.format(handle), 'wb') as f:
pkl.dump(preds_vs_actual_cv, f)
|
ericmjl/protein-convolutional-nets
|
experiments/subsample/train.py
|
Python
|
mit
| 8,395
|
from .authenticator import CarinaAuth
from .spawner import CarinaSpawner
|
betatim/carina-jupyterhub
|
carinahub/__init__.py
|
Python
|
bsd-3-clause
| 73
|
import unittest
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from google.appengine.datastore import datastore_stub_util
import gaeutils
class TestModel(ndb.Model):
"""A model class used for testing."""
number = ndb.IntegerProperty(default=42)
text = ndb.StringProperty()
class TestBase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
#self.testbed.init_taskqueue_stub(root_path=os.path.join('.'))
self.testbed.init_taskqueue_stub()
gaeutils.App.setup() # needed in test env
def tearDown(self):
self.testbed.deactivate()
class TestTest(TestBase):
def testInsertEntity(self):
"""
Test the test cases.
"""
TestModel().put()
self.assertEqual(1, len(TestModel.query().fetch(2)))
|
jaybaker/gaeutils
|
test/tests.py
|
Python
|
mit
| 1,127
|
# coding: utf-8
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
# These languages are used for generic tokenizer tests – only add a language
# here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'ut', 'tt',
'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
@pytest.fixture()
def JA(request):
mecab = pytest.importorskip("MeCab")
return util.get_lang_class('ja')()
#@pytest.fixture(params=_languages)
#def tokenizer(request):
#lang = util.get_lang_class(request.param)
#return lang.Defaults.create_tokenizer()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture(scope='session')
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ja_tokenizer():
mecab = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def tt_tokenizer():
return util.get_lang_class('tt').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def el_tokenizer():
return util.get_lang_class('el').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ur_tokenizer():
return util.get_lang_class('ur').Defaults.create_tokenizer()
@pytest.fixture(scope='session')
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
def getopt(opt):
# When using 'pytest --pyargs spacy' to test an installed copy of
# spacy, pytest skips running our pytest_addoption() hook. Later, when
# we call getoption(), pytest raises an error, because it doesn't
# recognize the option we're asking about. To avoid this, we need to
# pass a default value. We default to False, i.e., we act like all the
# options weren't given.
return item.config.getoption("--%s" % opt, False)
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not getopt(opt):
pytest.skip("need --%s option to run" % opt)
# Check if test is marked with models and has arguments set, i.e. specific
# language. If so, skip test if flag not set.
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not getopt(arg) and not getopt("all"):
pytest.skip("need --%s or --all option to run" % arg)
|
aikramer2/spaCy
|
spacy/tests/conftest.py
|
Python
|
mit
| 6,590
|
from website.settings import DOMAIN as OSF_DOMAIN
from website.project.model import User
from furl import furl
def serialize_comment(comment, full=False):
reports = serialize_reports(comment.reports)
author_abs_url = furl(OSF_DOMAIN)
author_abs_url.path.add(comment.user.url)
return {
'id': comment._id,
'author': User.load(comment.user._id),
'author_id': comment.user._id,
'author_path': author_abs_url.url,
'date_created': comment.date_created,
'date_modified': comment.date_modified,
'content': comment.content,
'has_children': bool(getattr(comment, 'commented', [])),
'modified': comment.modified,
'is_deleted': comment.is_deleted,
'spam_status': comment.spam_status,
'reports': reports,
'node': comment.node,
'category': reports[0]['category'],
}
def serialize_reports(reports):
return [
serialize_report(user, report)
for user, report in reports.iteritems()
]
def serialize_report(user, report):
return {
'reporter': User.load(user),
'category': report.get('category', None),
'reason': report.get('text', None),
}
|
billyhunt/osf.io
|
admin/spam/serializers.py
|
Python
|
apache-2.0
| 1,215
|
from sympy import Piecewise, lambdify, Equality, Unequality, Sum, Mod
from sympy.abc import x, i, j, a, b, c, d
from sympy.printing.lambdarepr import NumPyPrinter
import numpy as np
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([less(x, 0),True], [1,0], default=nan)'
def test_sum():
s = Sum(x ** i, (i, a, b))
f = lambdify((a, b, x), s, 'numpy')
a_, b_ = 0, 10
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, x_), sum(x_ ** i_ for i_ in range(a_, b_ + 1)))
s = Sum(i * x, (i, a, b))
f = lambdify((a, b, x), s, 'numpy')
a_, b_ = 0, 10
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, x_), sum(i_ * x_ for i_ in range(a_, b_ + 1)))
def test_multiple_sums():
s = Sum((x + j) * i, (i, a, b), (j, c, d))
f = lambdify((a, b, c, d, x), s, 'numpy')
a_, b_ = 0, 10
c_, d_ = 11, 21
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, c_, d_, x_),
sum((x_ + j_) * i_ for i_ in range(a_, b_ + 1) for j_ in range(c_, d_ + 1)))
def test_relational():
e = Equality(x, 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, True, False])
e = Unequality(x, 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, False, True])
e = (x < 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, False, False])
e = (x <= 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, True, False])
e = (x > 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, False, True])
e = (x >= 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, True, True])
def test_mod():
e = Mod(a, b)
f = lambdify((a, b), e)
a_ = np.array([0, 1, 2, 3])
b_ = 2
assert np.array_equal(f(a_, b_), [0, 1, 0, 1])
a_ = np.array([0, 1, 2, 3])
b_ = np.array([2, 2, 2, 2])
assert np.array_equal(f(a_, b_), [0, 1, 0, 1])
a_ = np.array([2, 3, 4, 5])
b_ = np.array([2, 3, 4, 5])
assert np.array_equal(f(a_, b_), [0, 0, 0, 0])
|
Shaswat27/sympy
|
sympy/printing/tests/test_numpy.py
|
Python
|
bsd-3-clause
| 2,530
|
import glob, sys, os.path
import MergeBowtie
import Mapping2
# this is needed to account for the fact that the maternal, paternal, and ref genomes all have different coordinates now
# that indels are handled
def makeMappers(maptmplt):
mappers={}
cs=['chr%s' % str(c) for c in range(1,23)] + ['chrX', 'chrY', 'chrM']
for c in cs:
f=maptmplt % c
if os.path.exists(f):
mappers[c] = Mapping2.Mapping(f)
return mappers
if __name__=='__main__':
Fathers=sorted(glob.glob("*_AltRefFather.txt"))
Mothers=sorted(glob.glob("*_AltRefMother.txt"))
maptmplt=sys.argv[1]
outfile=sys.argv[2]
mappers=makeMappers(maptmplt)
log=open('Merge.log', 'w')
ofp=open(outfile, 'w')
for f, m in zip(Fathers, Mothers):
print >>log, 'Merging %s %s' % (f, m)
log.flush()
assert (f.replace('AltRefFather', 'AltRefMother') == m)
MergeBowtie.process(open(f), open(m), mappers, ofp, log)
|
gersteinlab/AlleleDB
|
alleledb_pipeline/MergeDriver.py
|
Python
|
cc0-1.0
| 980
|
from django.template.defaultfilters import slugify
from smartmin.models import SmartModel
from django.db import models
"""
Full description of the landmark type
"""
class LandmarkType(models.Model):
name = models.CharField(max_length=128, help_text="Name describing the landmark type e.g. Popular name")
slug = models.SlugField(unique=True)
# automatically generate the slug from the landmark type name
def save(self):
super(LandmarkType, self).save()
self.slug = slugify(self.name)
super(LandmarkType, self).save()
def __unicode__(self):
return self.name
"""
Full description of the landmark
"""
class Landmark(SmartModel):
official_name = models.CharField(max_length=64, null=True, blank=True, help_text="Official name for the landmark e.g. CHUK")
unofficial_name = models.CharField(max_length=64, null=True, blank=True, help_text="Unofficial name for the landmark e.g. CHK")
landmark_type = models.ForeignKey(LandmarkType, help_text="The type of the landmark e.g. Hospital")
lat = models.DecimalField(max_digits=8, decimal_places=5, null=True, blank=True, help_text="The latitude of the landmark")
lng = models.DecimalField(max_digits=8, decimal_places=5, null=True, blank=True, help_text="The longitude of the landmark")
def __unicode__(self):
return self.official_name
|
nyaruka/motome
|
motome/landmarks/models.py
|
Python
|
bsd-3-clause
| 1,367
|
from geoalchemy2 import WKBElement, shape as ga_shape
from geomet import wkb
from shapely.geometry import LineString, MultiLineString, shape
from sqlalchemy.dialects import postgresql
import sqlalchemy as sa
from sqlalchemy.sql.expression import and_
from sqlalchemy.sql.functions import func
import re
def copy_attributes(obj_from, obj_to, attributes):
"""
Copies the given attributes from `obj_from` to `obj_to` (shallow copy).
"""
for attribute in attributes:
if hasattr(obj_from, attribute):
current_val = getattr(obj_to, attribute)
new_val = getattr(obj_from, attribute)
# To make the SQLAlchemy check if a document has changed work
# properly, we only copy an attribute if the value has changed.
# For geometries, we always copy the value.
if isinstance(current_val, WKBElement) or \
isinstance(new_val, WKBElement) or \
current_val != new_val:
setattr(obj_to, attribute, new_val)
class ArrayOfEnum(postgresql.ARRAY):
"""
SQLAlchemy type for an array of enums.
http://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#postgresql-array-of-enum
"""
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super(ArrayOfEnum, self).result_processor(
dialect, coltype)
def handle_raw_string(value):
if value == '{}':
return []
else:
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",")
def process(value):
if value is None:
return None
return super_rp(handle_raw_string(value))
return process
def extend_dict(d1, d2):
"""Update `d1` with the entries of `d2` and return `d1`.
"""
d1.update(d2)
return d1
def get_mid_point(wkb_track):
"""Get the point in the middle of a track. If the track is a
MultiLineString the point in the middle of the first line is taken.
"""
track = wkb_to_shape(wkb_track)
if isinstance(track, LineString):
mid_point = track.interpolate(0.5, True)
elif isinstance(track, MultiLineString) and track.geoms:
mid_point = track.geoms[0].interpolate(0.5, True)
else:
return None
return ga_shape.from_shape(mid_point, srid=3857)
def wkb_to_shape(wkb_element):
""" Create a 2D Shapely shape from a WKB value. 3D and 4D geometries
are turned into 2D geometries.
"""
assert(isinstance(wkb_element, WKBElement))
geometry = wkb.loads(bytes(wkb_element.data))
return shape(_force_2d(geometry))
def _force_2d(geojson_track):
if geojson_track['type'].lower() == 'point':
coords = geojson_track['coordinates']
geojson_track['coordinates'] = [coords[0], coords[1]]
elif geojson_track['type'].lower() == 'linestring':
geojson_track['coordinates'] = \
_force_2d_coords(geojson_track['coordinates'])
elif geojson_track['type'].lower() in ('multilinestring', 'polygon'):
geojson_track['coordinates'] = [
_force_2d_coords(coords)
for coords in geojson_track['coordinates']
]
elif geojson_track['type'].lower() == 'multipolygon':
geojson_track['coordinates'] = [
[_force_2d_coords(coords) for coords in polygon]
for polygon in geojson_track['coordinates']
]
else:
raise Exception('Unexpected geometry type')
return geojson_track
def _force_2d_coords(coords):
return [[coord[0], coord[1]] for coord in coords]
def windowed_query(q, column, windowsize):
""""Break a Query into windows on a given column.
Source: https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery # noqa
If the query does not use eager loading `yield_per` can be used instead for
native streaming.
"""
for whereclause in column_windows(
q.session,
column, windowsize):
for row in q.filter(whereclause).order_by(column):
yield row
def column_windows(session, column, windowsize):
"""Return a series of WHERE clauses against
a given column that break it into windows.
Result is an iterable of tuples, consisting of
((start, end), whereclause), where (start, end) are the ids.
Requires a database that supports window functions,
i.e. Postgresql, SQL Server, Oracle.
Enhance this yourself ! Add a "where" argument
so that windows of just a subset of rows can
be computed.
Source: https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery # noqa
"""
def int_for_range(start_id, end_id):
if end_id:
return and_(
column >= start_id,
column < end_id
)
else:
return column >= start_id
q = session.query(
column,
func.row_number().over(order_by=column).label('rownum')
). \
from_self(column)
if windowsize > 1:
q = q.filter(sa.text("rownum %% %d=1" % windowsize))
intervals = [id for id, in q]
while intervals:
start = intervals.pop(0)
if intervals:
end = intervals[0]
else:
end = None
yield int_for_range(start, end)
|
c2corg/v6_api
|
c2corg_api/models/utils.py
|
Python
|
agpl-3.0
| 5,447
|
#!/usr/bin/env python
# coding:utf-8
from __future__ import unicode_literals
import json
import time
import api_funcs as af
def end(gen):
if gen['instance_id']:
print('@@@ 注意: 如果实例在启动中, 指令发送了也删不掉哟~ @@@')
af.Instance.stop(gen['instance_id'])
# 1.1 等待实例停止
print('等待实例停止...(先等1s)...')
slept = 1
time.sleep(slept)
ins = af.Instance.status(gen['instance_id'])
while ins and ins['Status'] != 'Stopped':
print('等待中[%s]...' % slept)
time.sleep(10)
slept += 10
ins = af.Instance.status(gen['instance_id'])
else:
if ins:
print('实例已停止, 等5s缓冲...')
time.sleep(5)
else:
print('此实例已经不存在.')
print('开始删除实例, 镜像和快照...')
af.Instance.delete(gen['instance_id'])
else:
pass
af.Image.delete(gen['image_id'])
af.Snapshot.delete(gen['snapshot_id'])
return True
if __name__ == '__main__':
with open('nodes.json', 'r+') as f:
nodes_str = f.read()
nodes = json.loads(nodes_str)
for i, node in enumerate(nodes):
end(node)
print('[%s]==>>>' % (i+1))
print(json.dumps(node, ensure_ascii=False, indent=2))
print('[%s]==<<<' % (i+1))
|
boisde/Greed_Island
|
aliyuncli_ecs/3_stop.py
|
Python
|
mit
| 1,446
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Resource'
db.create_table('inventory_resource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('trainable', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('inventory', ['Resource'])
# Adding model 'Metadata'
db.create_table('inventory_metadata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.TextField')()),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(related_name='metadata', to=orm['inventory.Resource'])),
))
db.send_create_signal('inventory', ['Metadata'])
# Adding model 'TrainingLevel'
db.create_table('inventory_traininglevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('member', self.gf('django.db.models.fields.related.ForeignKey')(related_name='trainings', to=orm['membership.Member'])),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(related_name='trainings', to=orm['inventory.Resource'])),
('rank', self.gf('django.db.models.fields.IntegerField')()),
('comments', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('inventory', ['TrainingLevel'])
def backwards(self, orm):
# Deleting model 'Resource'
db.delete_table('inventory_resource')
# Deleting model 'Metadata'
db.delete_table('inventory_metadata')
# Deleting model 'TrainingLevel'
db.delete_table('inventory_traininglevel')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.metadata': {
'Meta': {'object_name': 'Metadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['inventory.Resource']"}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.TextField', [], {})
},
'inventory.resource': {
'Meta': {'object_name': 'Resource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'trainable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['membership.Member']", 'through': "orm['inventory.TrainingLevel']", 'symmetrical': 'False'})
},
'inventory.traininglevel': {
'Meta': {'object_name': 'TrainingLevel'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trainings'", 'to': "orm['membership.Member']"}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trainings'", 'to': "orm['inventory.Resource']"})
},
'membership.field': {
'Meta': {'object_name': 'Field'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'membership.fieldvalue': {
'Meta': {'object_name': 'FieldValue'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Field']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['membership.Member']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'membership.member': {
'Meta': {'object_name': 'Member'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['membership.Field']", 'through': "orm['membership.FieldValue']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastSeen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['inventory']
|
SYNHAK/spiff
|
spiff/inventory/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 9,068
|
"""
This effectively walks through module import statements recursively to find
all modules that a given one depends on.
It furthermore manages the packaging of newly found dependencies when requested
ISSUES: For speed, this does not use pathhooks unless imp.find_module fails.
Consequently, if modules can be found in two different sys.path entries,
the order
processed by this module may differ from the python import system
Entirely arbitrary pathhooks are not supported for now - only ZipImporter
(specifically importers with a archive attribute)
There are some hacks to deal with transmitting archives -- we coerce archives
to be stored
to cloud.archives/archive.
An eventual goal is to clean up the hackish pathhook support code
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
All rights reserved.
email: contact@picloud.com
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
from __future__ import with_statement
import os
import sys
import modulefinder
import imp
import marshal
import dis
#from ..serialization import cloudpickle
import cloudpickle
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s,%(msecs)03d %(levelname)-8s \
%(module)-16s L:%(lineno)03d P:%(process)-4d \
T:%(thread)-4d %(message)s",
datefmt='%H:%M:%S')
cloudLog = logging.getLogger(__name__)
#from .. import cloudconfig as cc
#cloudLog = logging.getLogger("Cloud.Transport")
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
ZIP_IMPORT = -9 # custom imp-like type code
class DependencyManager(modulefinder.ModuleFinder):
"""
Based off of module finder.
Features:
-IgnoreList to ignore base python packages for performance purposes
-Timestamp tracking
-'Snapshots' to determine new modules
-Warnings on using custom C extensions
Note: This is not thread safe: The user of this is responsible for
locking it down
TODO: Be smart with using import hooks (get_code)
"""
@staticmethod
def formatIgnoreList(unformattedList):
"""Format the ignore list"""
builtins = sys.builtin_module_names
ignoreList = {}
for module in unformattedList:
modname = module.strip()
if modname[0] == '#' or modname[0] == ';':
continue
modname = modname.split('.')
if modname[-1] == '*':
ignoreList[tuple(modname[:-1])] = True
else:
# set to False if .* isn't required
ignoreList[tuple(modname)] = True
#add builtins:
for builtin in builtins:
ignoreList[(builtin, )] = True
return ignoreList
def __init__(self, path=sys.path, debug=0, excludes=[], replace_paths=[]):
modulefinder.ModuleFinder.__init__(self, path, debug, None,
replace_paths)
self.ignoreList = self.formatIgnoreList(excludes)
self.lastSnapshot = set() # tracking
self.transitError = set() # avoid excessive extension warnings
# analyze main which is not transmitted
m = sys.modules['__main__']
if hasattr(m, '__file__') and cloudpickle.useForcedImports:
self.inject_module(m)
# if main imports a.b we might not see that b has been loaded
# The below is a hack to detect this case:
checkModules = self.modules.keys() + self.get_ignored_modules()
for mod in checkModules:
self.msgout(2, "inspect", m)
if '.' in mod:
loadedmod = sys.modules.get(mod)
# if mod is not loaded, import is triggered in a function:
if loadedmod:
if not hasattr(m, '___pyc_forcedImports__'):
m.___pyc_forcedImports__ = set()
m.___pyc_forcedImports__.add(loadedmod)
def shouldIgnore(self, modname):
"""Check ignoreList to determine if this module should
not be processed"""
modname = tuple(modname.split('.'))
if modname in self.ignoreList:
return True
for i in range(1, len(modname)):
tst = modname[0:-i]
# print 'doing test', tst
val = self.ignoreList.get(tst)
if val: # Must be true
return True
return False
def load_package(self, fqname, pathname, archive_name=None):
"""Fix bug with not passing parent into find_module"""
self.msgin(2, "load_package", fqname, pathname)
newname = modulefinder.replacePackageMap.get(fqname)
if newname:
fqname = newname
if archive_name: # part of an archive
m = self.add_module(fqname,
filename=archive_name,
path=[pathname] + \
modulefinder.packagePathMap.get(fqname,
[]),
is_archive=True)
else:
# As per comment in modulefinder, simulate runtime __path__
# additions.
m = self.add_module(fqname,
filename=pathname + '/__init__.py',
path=[pathname] + \
modulefinder.packagePathMap.get(fqname,
[]))
# Bug fix. python2.6 modulefinder doesn't pass parent to find_module
fp, buf, stuff = self.find_module("__init__", m.__path__, parent=m)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def inject_module(self, mod):
"""High level module adding.
This adds an actual module from sys.modules into the finder
"""
mname = mod.__name__
if mname in self.modules:
return
if self.shouldIgnore(mname):
return
if mname == '__main__': # special case
searchnames = []
dirs, mod = os.path.split(mod.__file__)
searchname = mod.split('.', 1)[0] # extract fully qualified name
else:
searchnames = mname.rsplit('.', 1)
# must load parents first...
pkg = searchnames[0]
if len(searchnames) > 1 and pkg not in self.modules:
self.inject_module(sys.modules[pkg])
searchname = searchnames[-1]
if len(searchnames) > 1: # this module has a parent - resolve it
path = sys.modules[pkg].__path__
else:
path = None
try:
fp, pathname, stuff = self.find_module(searchname, path)
self.load_module(mname, fp, pathname, stuff)
except ImportError: # Ignore import errors
pass
def add_module(self, fqname, filename, path=None, is_archive=False):
"""Save timestamp here"""
if fqname in self.modules:
return self.modules[fqname]
# print 'pre-adding %s' % fqname
if not filename: # ignore any builtin or extension
return
if is_archive:
# module's filename is set to the actual archive
relfilename = os.path.split(filename)[1]
# cloudpickle needs to know about this to deserialize correctly:
else:
# get relative path:
numsplits = fqname.count('.') + 1
relfilename = ""
absfilename = filename
for i in xrange(numsplits):
absfilename, tmp = os.path.split(absfilename)
relfilename = tmp + '/' + relfilename
if '__init__' in tmp:
# additional split as this is a package and
# __init__ is not in fqname
absfilename, tmp = os.path.split(absfilename)
relfilename = tmp + '/' + relfilename
relfilename = relfilename[:-1] # remove terminating /
self.modules[fqname] = m = modulefinder.Module(fqname,
relfilename, path)
# picloud: Timestamp module for update checks
m.timestamp = long(os.path.getmtime(filename))
m.is_archive = is_archive
return m
"""Manually try to find name on sys.path_hooks
Some code taken from python3.1 implib"""
def _path_hooks(self, path):
"""Search path hooks for a finder for 'path'.
"""
hooks = sys.path_hooks
for hook in hooks:
try:
finder = hook(path)
sys.path_importer_cache[path] = finder
return finder
except ImportError:
continue
return None
def manual_find(self, name, path):
"""Load with pathhooks. Return none if fails to load or if default
importer must be used
Otherwise returns loader object, path_loader_handles"""
finder = None
for entry in path:
try:
finder = sys.path_importer_cache[entry]
except KeyError:
finder = self._path_hooks(entry)
if finder:
loader = finder.find_module(name)
if loader:
return loader, entry
return None, None # nothing found!
def find_module(self, name, path, parent=None):
"""find_module using ignoreList
TODO: Somehow use pathhooks here
"""
if parent is not None:
# assert path is not None
fullname = parent.__name__ + '.' + name
else:
fullname = name
# print 'test to ignore %s -- %s -- %s' % (fullname, parent, path)
if self.shouldIgnore(fullname):
self.msgout(3, "find_module -> Ignored", fullname)
# PEP8 CHANGE
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = sys.path
# print 'imp is scanning for %s at %s' % (name, path)
try:
return imp.find_module(name, path)
except ImportError:
# try path hooks
loader, ldpath = self.manual_find(name, path)
if not loader:
raise
#We now have a PEP 302 loader object. Internally, we must format it
if not hasattr(loader, 'archive') or not hasattr(loader,
'get_code'):
if fullname not in self.transitError:
cloudLog.warn("Cloud cannot transmit python module '%s'.\
It needs to be imported by a %s path hook, but such a path\
hook \
does not provide both the \
'archive' and 'get_code' property.. Import errors may\
result;\
please see PiCloud documentation." % (fullname,
str(loader)))
self.transitError.add(fullname)
raise
return (None, ldpath + '/' + name, (loader, name, ZIP_IMPORT))
def get_ignored_modules(self):
"""Return list of modules that are used but were ignored"""
ignored = []
for name in self.badmodules:
if self.shouldIgnore(name):
ignored.append(name)
return ignored
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
PiCloud: Use ignoreList
"""
missing = []
maybe = []
for name in self.badmodules:
if self.shouldIgnore(name):
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i + 1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
#PiCloud: Warn on C extensions and __import_
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == ZIP_IMPORT:
#archive (as suffix) is an PEP 302 importer that implements
# archive and get_code
#pathname is used to access the file within the loader
archive = suffix
#mode is the actual name we want to read
name = mode
if archive.is_package(name): # use load_package with archive set
m = self.load_package(fqname, pathname,
archive_name=archive.archive)
return m
else:
try:
co = archive.get_code(name)
except ImportError:
cloudLog.warn("Cloud cannot read '%s' within '%s'. \
Import errors may result; \
please see PiCloud documentation." % (fqname,
archive.archive))
raise
m = self.add_module(fqname, archive.archive, is_archive=True)
else:
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
elif type == imp.PY_SOURCE:
try:
co = compile(fp.read() + '\n', pathname, 'exec')
except SyntaxError: # compilation fail.
cloudLog.warn("Syntax error in %s. Import errors may\
occur in rare situations." % pathname)
raise ImportError("Syntax error in %s" % pathname)
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
cloudLog.warn("Magic number on %s is invalid. Import\
errors may occur in rare situations." % pathname)
self.msgout(2, "raise ImportError: Bad magic number",
pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(4)
co = marshal.load(fp)
elif type == imp.C_EXTENSION:
if fqname not in self.transitError:
cloudLog.warn("Cloud cannot transmit python extension\
'%s' located at '%s'. Import errors may result;\
please see PiCloud documentation." % (fqname,
pathname))
self.transitError.add(fqname)
raise ImportError(fqname)
else:
co = None
m = self.add_module(fqname, filename=pathname)
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
names = co.co_names
if names and '__import__' in names:
#PiCloud: Warn on __import__
cloudLog.warn('__import__ found within %s. Cloud cannot\
follow these \
dependencies. You MAY see importerror cloud exceptions.\
For more information,\
consult the PiCloud manual'
% fqname)
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def getUpdatedSnapshot(self):
"""Return any new myMods values since this was last called"""
outList = []
for modname, modobj in self.modules.items():
if modname not in self.lastSnapshot:
if modobj.is_archive: # check if archive has already been sent
archive = modobj.__file__
if archive in self.lastSnapshot:
continue
else:
self.lastSnapshot.add(archive)
outList.append((modobj.__file__, modobj.timestamp,
modobj.is_archive))
self.lastSnapshot.add(modname)
return outList
class FilePackager(object):
"""This class is responsible for the packaging of files"""
"""This is not thread safe"""
fileCollection = None
ARCHIVE_PATH = 'cloud.archive/' # location where archives are extracted
def __init__(self, path_infos=None):
"""path_infos is a list of (paths relative to site-packages,
archive)"""
self.fileCollection = {}
if path_infos:
for relPath, archive in path_infos:
if archive:
self.addArchive(relPath)
else:
self.addRelativePath(relPath)
def addArchive(self, archive_name):
for site in sys.path:
if site.endswith(archive_name):
self.fileCollection[self.ARCHIVE_PATH + archive_name] = site
def addRelativePath(self, relPath):
"""Add a file by relative path to the File Transfer"""
for site in sys.path:
if site != '':
site += '/'
tst = os.path.join(site, relPath.encode())
if os.path.exists(tst):
self.fileCollection[relPath] = tst
return
from ..cloud import CloudException
raise CloudException('FilePackager: %s not found on sys.path' %
relPath)
def getTarball(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import tarfile
outfile = StringIO()
tfile = tarfile.open(name='transfer.tar', fileobj=outfile, mode='w')
tfile.dereference = True
for arcname, fname in self.fileCollection.items():
tfile.add(name=fname, arcname=arcname, recursive=False)
tfile.close()
return outfile.getvalue()
|
jpzk/evopy
|
evopy/external/playdoh/codehandler/codedependency.py
|
Python
|
gpl-3.0
| 20,711
|
# Copyright 2016:
# * Jim Unroe KC9HI, <rock.unroe@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import struct
import logging
import re
LOG = logging.getLogger(__name__)
from chirp import chirp_common, directory, memmap
from chirp import bitwise, errors, util
from chirp.settings import RadioSettingGroup, RadioSetting, \
RadioSettingValueBoolean, RadioSettingValueList, \
RadioSettingValueString, RadioSettingValueInteger, \
RadioSettingValueFloat, RadioSettings
from textwrap import dedent
MEM_FORMAT = """
#seekto 0x0200;
struct {
u8 unknown1;
u8 volume;
u8 unknown2[2];
u8 wtled;
u8 rxled;
u8 txled;
u8 ledsw;
u8 beep;
u8 ring;
u8 bcl;
u8 tot;
} settings;
struct vfo {
u8 unknown1[2];
u32 rxfreq;
u8 unknown2[8];
u8 power;
u8 unknown3[3];
u24 offset;
u32 step;
u8 sql;
};
#seekto 0x0300;
struct {
struct vfo vfoa;
} upper;
#seekto 0x0380;
struct {
struct vfo vfob;
} lower;
struct mem {
u32 rxfreq;
u16 is_rxdigtone:1,
rxdtcs_pol:1,
rxtone:14;
u8 recvmode;
u32 txfreq;
u16 is_txdigtone:1,
txdtcs_pol:1,
txtone:14;
u8 botsignal;
u8 eotsignal;
u8 power:1,
wide:1,
compandor:1
scrambler:1
unknown:4;
u8 namelen;
u8 name[6];
u8 unused;
};
#seekto 0x0400;
struct mem upper_memory[128];
#seekto 0x1000;
struct mem lower_memory[128];
"""
MEM_SIZE = 0x1C00
BLOCK_SIZE = 0x40
STIMEOUT = 2
LIST_RECVMODE = ["", "QT/DQT", "QT/DQT + Signaling"]
LIST_SIGNAL = ["Off"] + ["DTMF%s" % x for x in range(1, 9)] + \
["DTMF%s + Identity" % x for x in range(1, 9)] + \
["Identity code"]
LIST_POWER = ["Low", "Mid", "High"]
LIST_COLOR = ["Off", "Orange", "Blue", "Purple"]
LIST_LEDSW = ["Auto", "On"]
LIST_RING = ["Off"] + ["%s seconds" % x for x in range(1, 10)]
LIST_TIMEOUT = ["Off"] + ["%s seconds" % x for x in range(30, 630, 30)]
def _clean_buffer(radio):
radio.pipe.timeout = 0.005
junk = radio.pipe.read(256)
radio.pipe.timeout = STIMEOUT
if junk:
Log.debug("Got %i bytes of junk before starting" % len(junk))
def _rawrecv(radio, amount):
"""Raw read from the radio device"""
data = ""
try:
data = radio.pipe.read(amount)
except:
_exit_program_mode(radio)
msg = "Generic error reading data from radio; check your cable."
raise errors.RadioError(msg)
if len(data) != amount:
_exit_program_mode(radio)
msg = "Error reading data from radio: not the amount of data we want."
raise errors.RadioError(msg)
return data
def _rawsend(radio, data):
"""Raw send to the radio device"""
try:
radio.pipe.write(data)
except:
raise errors.RadioError("Error sending data to radio")
def _make_frame(cmd, addr, length, data=""):
"""Pack the info in the headder format"""
frame = struct.pack(">4sHH", cmd, addr, length)
# add the data if set
if len(data) != 0:
frame += data
# return the data
return frame
def _recv(radio, addr, length):
"""Get data from the radio """
data = _rawrecv(radio, length)
# DEBUG
LOG.info("Response:")
LOG.debug(util.hexprint(data))
return data
def _do_ident(radio):
"""Put the radio in PROGRAM mode & identify it"""
# set the serial discipline
radio.pipe.baudrate = 19200
radio.pipe.parity = "N"
radio.pipe.timeout = STIMEOUT
# flush input buffer
_clean_buffer(radio)
magic = "PROM_LIN"
_rawsend(radio, magic)
ack = _rawrecv(radio, 1)
if ack != "\x06":
_exit_program_mode(radio)
if ack:
LOG.debug(repr(ack))
raise errors.RadioError("Radio did not respond")
return True
def _exit_program_mode(radio):
endframe = "EXIT"
_rawsend(radio, endframe)
def _download(radio):
"""Get the memory map"""
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
data = ""
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
frame = _make_frame("READ", addr, BLOCK_SIZE)
# DEBUG
LOG.info("Request sent:")
LOG.debug(util.hexprint(frame))
# sending the read request
_rawsend(radio, frame)
# now we read
d = _recv(radio, addr, BLOCK_SIZE)
# aggregate the data
data += d
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
_exit_program_mode(radio)
data += "LT-725UV"
return data
def _upload(radio):
"""Upload procedure"""
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
# the fun starts here
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
# sending the data
data = radio.get_mmap()[addr:addr + BLOCK_SIZE]
frame = _make_frame("WRIE", addr, BLOCK_SIZE, data)
_rawsend(radio, frame)
# receiving the response
ack = _rawrecv(radio, 1)
if ack != "\x06":
_exit_program_mode(radio)
msg = "Bad ack writing block 0x%04x" % addr
raise errors.RadioError(msg)
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
_exit_program_mode(radio)
def model_match(cls, data):
"""Match the opened/downloaded image to the correct version"""
rid = data[0x1C00:0x1C08]
if rid == cls.MODEL:
return True
return False
def _split(rf, f1, f2):
"""Returns False if the two freqs are in the same band (no split)
or True otherwise"""
# determine if the two freqs are in the same band
for low, high in rf.valid_bands:
if f1 >= low and f1 <= high and \
f2 >= low and f2 <= high:
# if the two freqs are on the same Band this is not a split
return False
# if you get here is because the freq pairs are split
return True
@directory.register
class LT725UV(chirp_common.CloneModeRadio,
chirp_common.ExperimentalRadio):
"""LUITON LT-725UV Radio"""
VENDOR = "LUITON"
MODEL = "LT-725UV"
MODES = ["NFM", "FM"]
TONES = chirp_common.TONES
DTCS_CODES = sorted(chirp_common.DTCS_CODES + [645])
NAME_LENGTH = 6
DTMF_CHARS = list("0123456789ABCD*#")
VALID_BANDS = [(136000000, 176000000),
(400000000, 480000000)]
# valid chars on the LCD
VALID_CHARS = chirp_common.CHARSET_ALPHANUMERIC + \
"`{|}!\"#$%&'()*+,-./:;<=>?@[]^_"
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.experimental = \
('The LT725UV driver is a beta version.\n'
'\n'
'Please save an unedited copy of your first successful\n'
'download to a CHIRP Radio Images(*.img) file.'
)
rp.pre_download = _(dedent("""\
Follow this instructions to download your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the download of your radio data
"""))
rp.pre_upload = _(dedent("""\
Follow this instructions to upload your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the upload of your radio data
"""))
return rp
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_settings = True
rf.has_bank = False
rf.has_tuning_step = False
rf.can_odd_split = True
rf.has_name = True
rf.has_offset = True
rf.has_mode = True
rf.has_dtcs = True
rf.has_rx_dtcs = True
rf.has_dtcs_polarity = True
rf.has_ctone = True
rf.has_cross = True
rf.has_sub_devices = self.VARIANT == ""
rf.valid_modes = self.MODES
rf.valid_characters = self.VALID_CHARS
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_tmodes = ['', 'Tone', 'TSQL', 'DTCS', 'Cross']
rf.valid_cross_modes = [
"Tone->Tone",
"DTCS->",
"->DTCS",
"Tone->DTCS",
"DTCS->Tone",
"->Tone",
"DTCS->DTCS"]
rf.valid_skips = []
rf.valid_name_length = self.NAME_LENGTH
rf.valid_dtcs_codes = self.DTCS_CODES
rf.valid_bands = self.VALID_BANDS
rf.memory_bounds = (1, 128)
return rf
def get_sub_devices(self):
return [LT725UVUpper(self._mmap), LT725UVLower(self._mmap)]
def sync_in(self):
"""Download from radio"""
try:
data = _download(self)
except errors.RadioError:
# Pass through any real errors we raise
raise
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during download')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
self._mmap = memmap.MemoryMap(data)
self.process_mmap()
def sync_out(self):
"""Upload to radio"""
try:
_upload(self)
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during upload')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
def process_mmap(self):
"""Process the mem map into the mem object"""
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
def get_raw_memory(self, number):
return repr(self._memobj.memory[number - 1])
def _memory_obj(self, suffix=""):
return getattr(self._memobj, "%s_memory%s" % (self._vfo, suffix))
def _get_dcs(self, val):
return int(str(val)[2:-18])
def _set_dcs(self, val):
return int(str(val), 16)
def get_memory(self, number):
_mem = self._memory_obj()[number - 1]
mem = chirp_common.Memory()
mem.number = number
if _mem.get_raw()[0] == "\xff":
mem.empty = True
return mem
mem.freq = int(_mem.rxfreq) * 10
if _mem.txfreq == 0xFFFFFFFF:
# TX freq not set
mem.duplex = "off"
mem.offset = 0
elif int(_mem.rxfreq) == int(_mem.txfreq):
mem.duplex = ""
mem.offset = 0
elif _split(self.get_features(), mem.freq, int(_mem.txfreq) * 10):
mem.duplex = "split"
mem.offset = int(_mem.txfreq) * 10
else:
mem.duplex = int(_mem.rxfreq) > int(_mem.txfreq) and "-" or "+"
mem.offset = abs(int(_mem.rxfreq) - int(_mem.txfreq)) * 10
for char in _mem.name[:_mem.namelen]:
mem.name += chr(char)
dtcs_pol = ["N", "N"]
if _mem.rxtone == 0x3FFF:
rxmode = ""
elif _mem.is_rxdigtone == 0:
# ctcss
rxmode = "Tone"
mem.ctone = int(_mem.rxtone) / 10.0
else:
# digital
rxmode = "DTCS"
mem.rx_dtcs = self._get_dcs(_mem.rxtone)
if _mem.rxdtcs_pol == 1:
dtcs_pol[1] = "R"
if _mem.txtone == 0x3FFF:
txmode = ""
elif _mem.is_txdigtone == 0:
# ctcss
txmode = "Tone"
mem.rtone = int(_mem.txtone) / 10.0
else:
# digital
txmode = "DTCS"
mem.dtcs = self._get_dcs(_mem.txtone)
if _mem.txdtcs_pol == 1:
dtcs_pol[0] = "R"
if txmode == "Tone" and not rxmode:
mem.tmode = "Tone"
elif txmode == rxmode and txmode == "Tone" and mem.rtone == mem.ctone:
mem.tmode = "TSQL"
elif txmode == rxmode and txmode == "DTCS" and mem.dtcs == mem.rx_dtcs:
mem.tmode = "DTCS"
elif rxmode or txmode:
mem.tmode = "Cross"
mem.cross_mode = "%s->%s" % (txmode, rxmode)
mem.dtcs_polarity = "".join(dtcs_pol)
mem.mode = self.MODES[_mem.wide]
# Extra
mem.extra = RadioSettingGroup("extra", "Extra")
if _mem.recvmode == 0xFF:
val = 0x00
else:
val = _mem.recvmode
recvmode = RadioSetting("recvmode", "Receiving mode",
RadioSettingValueList(LIST_RECVMODE,
LIST_RECVMODE[val]))
mem.extra.append(recvmode)
if _mem.botsignal == 0xFF:
val = 0x00
else:
val = _mem.botsignal
botsignal = RadioSetting("botsignal", "Launch signaling",
RadioSettingValueList(LIST_SIGNAL,
LIST_SIGNAL[val]))
mem.extra.append(botsignal)
if _mem.eotsignal == 0xFF:
val = 0x00
else:
val = _mem.eotsignal
eotsignal = RadioSetting("eotsignal", "Transmit end signaling",
RadioSettingValueList(LIST_SIGNAL,
LIST_SIGNAL[val]))
mem.extra.append(eotsignal)
compandor = RadioSetting("compandor", "Compandor",
RadioSettingValueBoolean(bool(_mem.compandor)))
mem.extra.append(compandor)
scrambler = RadioSetting("scrambler", "Scrambler",
RadioSettingValueBoolean(bool(_mem.scrambler)))
mem.extra.append(scrambler)
return mem
def set_memory(self, mem):
_mem = self._memory_obj()[mem.number - 1]
if mem.empty:
_mem.set_raw("\xff" * 24)
_mem.namelen = 0
return
_mem.set_raw("\xFF" * 15 + "\x00\x00" + "\xFF" * 7)
_mem.rxfreq = mem.freq / 10
if mem.duplex == "off":
_mem.txfreq = 0xFFFFFFFF
elif mem.duplex == "split":
_mem.txfreq = mem.offset / 10
elif mem.duplex == "+":
_mem.txfreq = (mem.freq + mem.offset) / 10
elif mem.duplex == "-":
_mem.txfreq = (mem.freq - mem.offset) / 10
else:
_mem.txfreq = mem.freq / 10
_mem.namelen = len(mem.name)
_namelength = self.get_features().valid_name_length
for i in range(_namelength):
try:
_mem.name[i] = ord(mem.name[i])
except IndexError:
_mem.name[i] = 0xFF
rxmode = ""
txmode = ""
if mem.tmode == "Tone":
txmode = "Tone"
elif mem.tmode == "TSQL":
rxmode = "Tone"
txmode = "TSQL"
elif mem.tmode == "DTCS":
rxmode = "DTCSSQL"
txmode = "DTCS"
elif mem.tmode == "Cross":
txmode, rxmode = mem.cross_mode.split("->", 1)
if rxmode == "":
_mem.rxdtcs_pol = 1
_mem.is_rxdigtone = 1
_mem.rxtone = 0x3FFF
elif rxmode == "Tone":
_mem.rxdtcs_pol = 0
_mem.is_rxdigtone = 0
_mem.rxtone = int(mem.ctone * 10)
elif rxmode == "DTCSSQL":
_mem.rxdtcs_pol = 1 if mem.dtcs_polarity[1] == "R" else 0
_mem.is_rxdigtone = 1
_mem.rxtone = self._set_dcs(mem.dtcs)
elif rxmode == "DTCS":
_mem.rxdtcs_pol = 1 if mem.dtcs_polarity[1] == "R" else 0
_mem.is_rxdigtone = 1
_mem.rxtone = self._set_dcs(mem.rx_dtcs)
if txmode == "":
_mem.txdtcs_pol = 1
_mem.is_txdigtone = 1
_mem.txtone = 0x3FFF
elif txmode == "Tone":
_mem.txdtcs_pol = 0
_mem.is_txdigtone = 0
_mem.txtone = int(mem.rtone * 10)
elif txmode == "TSQL":
_mem.txdtcs_pol = 0
_mem.is_txdigtone = 0
_mem.txtone = int(mem.ctone * 10)
elif txmode == "DTCS":
_mem.txdtcs_pol = 1 if mem.dtcs_polarity[0] == "R" else 0
_mem.is_txdigtone = 1
_mem.txtone = self._set_dcs(mem.dtcs)
_mem.wide = self.MODES.index(mem.mode)
# extra settings
for setting in mem.extra:
setattr(_mem, setting.get_name(), setting.value)
def get_settings(self):
"""Translate the bit in the mem_struct into settings in the UI"""
_mem = self._memobj
basic = RadioSettingGroup("basic", "Basic Settings")
top = RadioSettings(basic)
# Basic
volume = RadioSetting("settings.volume", "Volume",
RadioSettingValueInteger(0, 20,
_mem.settings.volume))
basic.append(volume)
powera = RadioSetting("upper.vfoa.power", "Power (Upper)",
RadioSettingValueList(LIST_POWER, LIST_POWER[
_mem.upper.vfoa.power]))
basic.append(powera)
powerb = RadioSetting("lower.vfob.power", "Power (Lower)",
RadioSettingValueList(LIST_POWER, LIST_POWER[
_mem.lower.vfob.power]))
basic.append(powerb)
wtled = RadioSetting("settings.wtled", "Standby LED Color",
RadioSettingValueList(LIST_COLOR, LIST_COLOR[
_mem.settings.wtled]))
basic.append(wtled)
rxled = RadioSetting("settings.rxled", "RX LED Color",
RadioSettingValueList(LIST_COLOR, LIST_COLOR[
_mem.settings.rxled]))
basic.append(rxled)
txled = RadioSetting("settings.txled", "TX LED Color",
RadioSettingValueList(LIST_COLOR, LIST_COLOR[
_mem.settings.txled]))
basic.append(txled)
ledsw = RadioSetting("settings.ledsw", "Back light mode",
RadioSettingValueList(LIST_LEDSW, LIST_LEDSW[
_mem.settings.ledsw]))
basic.append(ledsw)
beep = RadioSetting("settings.beep", "Beep",
RadioSettingValueBoolean(bool(_mem.settings.beep)))
basic.append(beep)
ring = RadioSetting("settings.ring", "Ring",
RadioSettingValueList(LIST_RING, LIST_RING[
_mem.settings.ring]))
basic.append(ring)
bcl = RadioSetting("settings.bcl", "Busy channel lockout",
RadioSettingValueBoolean(bool(_mem.settings.bcl)))
basic.append(bcl)
tot = RadioSetting("settings.tot", "Timeout Timer",
RadioSettingValueList(LIST_TIMEOUT, LIST_TIMEOUT[
_mem.settings.tot]))
basic.append(tot)
if _mem.upper.vfoa.sql == 0xFF:
val = 0x04
else:
val = _mem.upper.vfoa.sql
sqla = RadioSetting("upper.vfoa.sql", "Squelch (Upper)",
RadioSettingValueInteger(0, 9, val))
basic.append(sqla)
if _mem.lower.vfob.sql == 0xFF:
val = 0x04
else:
val = _mem.lower.vfob.sql
sqlb = RadioSetting("lower.vfob.sql", "Squelch (Lower)",
RadioSettingValueInteger(0, 9, val))
basic.append(sqlb)
return top
def set_settings(self, settings):
_settings = self._memobj.settings
_mem = self._memobj
for element in settings:
if not isinstance(element, RadioSetting):
self.set_settings(element)
continue
else:
try:
name = element.get_name()
if "." in name:
bits = name.split(".")
obj = self._memobj
for bit in bits[:-1]:
if "/" in bit:
bit, index = bit.split("/", 1)
index = int(index)
obj = getattr(obj, bit)[index]
else:
obj = getattr(obj, bit)
setting = bits[-1]
else:
obj = _settings
setting = element.get_name()
if element.has_apply_callback():
LOG.debug("Using apply callback")
element.run_apply_callback()
elif element.value.get_mutable():
LOG.debug("Setting %s = %s" % (setting, element.value))
setattr(obj, setting, element.value)
except Exception, e:
LOG.debug(element.get_name())
raise
@classmethod
def match_model(cls, filedata, filename):
match_size = False
match_model = False
# testing the file data size
if len(filedata) == MEM_SIZE + 8:
match_size = True
# testing the firmware model fingerprint
match_model = model_match(cls, filedata)
if match_size and match_model:
return True
else:
return False
class LT725UVUpper(LT725UV):
VARIANT = "Upper"
_vfo = "upper"
class LT725UVLower(LT725UV):
VARIANT = "Lower"
_vfo = "lower"
|
mach327/chirp_fork
|
chirp/drivers/lt725uv.py
|
Python
|
gpl-3.0
| 22,684
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BFRES Tool
# Version 5.1
# Copyright © 2017-2018 AboodXD
# This file is part of BFRES Tool.
# BFRES Tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BFRES Tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
class GX2Surface(struct.Struct):
def __init__(self):
super().__init__('>16I')
def data(self, data, pos):
(self.dim,
self.width,
self.height,
self.depth,
self.numMips,
self.format_,
self.aa,
self.use,
self.imageSize,
self.imagePtr,
self.mipSize,
self.mipPtr,
self.tileMode,
self.swizzle,
self.alignment,
self.pitch) = self.unpack_from(data, pos)
class empty():
pass
|
aboood40091/BFRES-Tool
|
structs.py
|
Python
|
gpl-3.0
| 1,321
|
def collatz_generator(n):
yield n
while n > 1:
if n%2 == 0:
n = n/2
yield n
else:
n = 3*n + 1
yield n
i = 1000000
max_chain = 0
max_num = 0
while i > 1:
n = len(list(collatz_generator(i)))
if n > max_chain:
max_chain = n
max_num = i
i -= 1
print max_num, max_chain
|
cmaron/Project-Euler
|
14-longestCollatzSequence.py
|
Python
|
mit
| 291
|
# -*- coding: utf-8 -*-
import time
from PyQt5.QtCore import Qt, QPoint
from PyQt5.QtGui import QCursor
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import QApplication
from . import event_loop
from .gestures import Automaton, MIN_TIME_TO_AVOID_DOUBLE_CLICK
ONE_SECOND_IN_MILLIS = 1000
class KeyboardLayout:
MODIFIER_MAP = {
'ctrl': Qt.ControlModifier,
'command': Qt.ControlModifier
}
KEY_MAP = {
'backspace': Qt.Key_Backspace,
'escape': Qt.Key_Escape,
'return': Qt.Key_Return,
'f4': Qt.Key_F4,
}
@staticmethod
def modifier_code(key):
return KeyboardLayout.MODIFIER_MAP.get(key, key)
@staticmethod
def is_modifier(key):
return key in KeyboardLayout.MODIFIER_MAP
@staticmethod
def key_code(key):
return KeyboardLayout.KEY_MAP.get(key, key)
class MouseLayout:
BUTTON_MAP = {
'left': Qt.LeftButton,
'right': Qt.RightButton,
}
@staticmethod
def button_code(button):
return MouseLayout.BUTTON_MAP.get(button, button)
def current_time():
return time.perf_counter()
class Robot(Automaton):
"""
A robotic automaton that simulates human gestures. It is very fast, but has limitations.
The known limitations are:
- no mouse drag and drop
- no right clicks on Mac
"""
MOUSE_MOVE_DELAY = 10 # in ms
# For detecting double clicks
_last_button_clicked = -1
_last_click_time = 0
_last_click_position = (-1, -1)
def __init__(self):
self._modifiers = Qt.NoModifier
@property
def mouse_position(self):
current_position = QCursor.pos()
return current_position.x(), current_position.y()
def press_key(self, key):
if KeyboardLayout.is_modifier(key):
self._modifiers |= KeyboardLayout.modifier_code(key)
else:
QTest.keyPress(self._widget_under_cursor(), KeyboardLayout.key_code(key), self._modifiers)
def release_key(self, key):
if KeyboardLayout.is_modifier(key):
self._modifiers &= ~KeyboardLayout.modifier_code(key)
else:
QTest.keyRelease(self._widget_under_cursor(), KeyboardLayout.key_code(key), self._modifiers)
def type(self, key):
QTest.keyClick(self._widget_under_cursor(), KeyboardLayout.key_code(key), self._modifiers)
def move_mouse(self, x, y):
QCursor.setPos(x, y)
self.delay(self.MOUSE_MOVE_DELAY)
def press_mouse(self, button):
mouse_action = QTest.mouseDClick if self._double_click_detected(button) else QTest.mousePress
self._at_cursor_position(mouse_action, MouseLayout.button_code(button))
# for detecting double clicks
self._last_click_time = current_time()
self._last_button_clicked = button
self._last_click_position = self.mouse_position
def _double_click_detected(self, button_clicked):
current_position = self.mouse_position
elapsed_time_in_ms = (current_time() - self._last_click_time) * ONE_SECOND_IN_MILLIS
return (button_clicked == self._last_button_clicked) and \
(current_position == self._last_click_position) and \
(elapsed_time_in_ms <= MIN_TIME_TO_AVOID_DOUBLE_CLICK)
def release_mouse(self, button):
self._at_cursor_position(QTest.mouseRelease, MouseLayout.button_code(button))
def double_click_mouse(self, button):
self._at_cursor_position(QTest.mouseDClick, MouseLayout.button_code(button))
def delay(self, ms):
event_loop.process_events_for(ms)
def _at_cursor_position(self, mouse_action, button):
# By default QTest will operate mouse at the center of the widget,
# but we want the action to occur at the current cursor position
mouse_action(self._widget_under_cursor(), button, self._modifiers,
self._relative_position_to_widget_under_cursor())
def _widget_under_cursor(self):
return _widget_at(*self.mouse_position)
def _relative_position_to_widget_under_cursor(self):
return _compute_relative_position(self._widget_under_cursor(), *self.mouse_position)
def _widget_at(x, y):
widget = QApplication.widgetAt(x, y)
if not widget:
raise AssertionError('No widget at screen position (%d, %d)!'
' Have you moved the mouse while running the tests?' % (x, y))
return widget
def _compute_relative_position(widget, x, y):
return widget.mapFromGlobal(QPoint(x, y))
|
Iconoclasteinc/tgit
|
cute/robot.py
|
Python
|
gpl-3.0
| 4,559
|
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import base64
import json
import os
import shutil
import re
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.protocol.restapi import *
from azurelinuxagent.common.utils.cryptutil import CryptUtil
METADATA_ENDPOINT = '169.254.169.254'
APIVERSION = '2015-05-01-preview'
BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}"
TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem"
TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem"
P7M_FILE_NAME = "Certificates.p7m"
P7B_FILE_NAME = "Certificates.p7b"
PEM_FILE_NAME = "Certificates.pem"
KEY_AGENT_VERSION_URIS = "versionsManifestUris"
KEY_URI = "uri"
# TODO remote workaround for azure stack
MAX_PING = 30
RETRY_PING_INTERVAL = 10
def _add_content_type(headers):
if headers is None:
headers = {}
headers["content-type"] = "application/json"
return headers
class MetadataProtocol(Protocol):
def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT):
self.apiversion = apiversion
self.endpoint = endpoint
self.identity_uri = BASE_URI.format(self.endpoint, "identity",
self.apiversion)
self.cert_uri = BASE_URI.format(self.endpoint, "certificates",
self.apiversion)
self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers",
self.apiversion)
self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions",
self.apiversion)
self.provision_status_uri = BASE_URI.format(self.endpoint,
"provisioningStatus",
self.apiversion, "")
self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent",
self.apiversion, "")
self.ext_status_uri = BASE_URI.format(self.endpoint,
"status/extensions/{0}",
self.apiversion, "")
self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry",
self.apiversion, "")
self.certs = None
self.agent_manifests = None
self.agent_etag = None
def _get_data(self, url, headers=None):
try:
resp = restutil.http_get(url, headers=headers)
except HttpError as e:
raise ProtocolError(ustr(e))
if restutil.request_failed(resp):
raise ProtocolError("{0} - GET: {1}".format(resp.status, url))
data = resp.read()
etag = resp.getheader('ETag')
if data is not None:
data = json.loads(ustr(data, encoding="utf-8"))
return data, etag
def _put_data(self, url, data, headers=None):
headers = _add_content_type(headers)
try:
resp = restutil.http_put(url, json.dumps(data), headers=headers)
except HttpError as e:
raise ProtocolError(ustr(e))
if restutil.request_failed(resp):
raise ProtocolError("{0} - PUT: {1}".format(resp.status, url))
def _post_data(self, url, data, headers=None):
headers = _add_content_type(headers)
try:
resp = restutil.http_post(url, json.dumps(data), headers=headers)
except HttpError as e:
raise ProtocolError(ustr(e))
if resp.status != httpclient.CREATED:
logger.warn("{0} for POST {1}".format(resp.status, url))
def _get_trans_cert(self):
trans_crt_file = os.path.join(conf.get_lib_dir(),
TRANSPORT_CERT_FILE_NAME)
if not os.path.isfile(trans_crt_file):
raise ProtocolError("{0} is missing.".format(trans_crt_file))
content = fileutil.read_file(trans_crt_file)
return textutil.get_bytes_from_pem(content)
def detect(self):
self.get_vminfo()
trans_prv_file = os.path.join(conf.get_lib_dir(),
TRANSPORT_PRV_FILE_NAME)
trans_cert_file = os.path.join(conf.get_lib_dir(),
TRANSPORT_CERT_FILE_NAME)
cryptutil = CryptUtil(conf.get_openssl_cmd())
cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file)
# "Install" the cert and private key to /var/lib/waagent
thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file)
prv_file = os.path.join(conf.get_lib_dir(),
"{0}.prv".format(thumbprint))
crt_file = os.path.join(conf.get_lib_dir(),
"{0}.crt".format(thumbprint))
shutil.copyfile(trans_prv_file, prv_file)
shutil.copyfile(trans_cert_file, crt_file)
self.update_goal_state(forced=True)
def get_vminfo(self):
vminfo = VMInfo()
data, etag = self._get_data(self.identity_uri)
set_properties("vminfo", vminfo, data)
return vminfo
def get_certs(self):
certlist = CertList()
certificatedata = CertificateData()
data, etag = self._get_data(self.cert_uri)
set_properties("certlist", certlist, data)
cert_list = get_properties(certlist)
headers = {
"x-ms-vmagent-public-x509-cert": self._get_trans_cert()
}
for cert_i in cert_list["certificates"]:
certificate_data_uri = cert_i['certificateDataUri']
data, etag = self._get_data(certificate_data_uri, headers=headers)
set_properties("certificatedata", certificatedata, data)
json_certificate_data = get_properties(certificatedata)
self.certs = Certificates(self, json_certificate_data)
if self.certs is None:
return None
return self.certs
def get_incarnation(self):
# Always return 0 since Azure Stack does not maintain goal state
# incarnation identifiers
return 0
def get_vmagent_manifests(self):
self.update_goal_state()
data, etag = self._get_data(self.vmagent_uri)
if self.agent_etag is None or self.agent_etag < etag:
self.agent_etag = etag
# Create a list with a single manifest
# -- The protocol lacks "family," use the configured family
self.agent_manifests = VMAgentManifestList()
manifest = VMAgentManifest()
manifest.family = family=conf.get_autoupdate_gafamily()
if not KEY_AGENT_VERSION_URIS in data:
raise ProtocolError(
"Agent versions missing '{0}': {1}".format(
KEY_AGENT_VERSION_URIS, data))
for version in data[KEY_AGENT_VERSION_URIS]:
if not KEY_URI in version:
raise ProtocolError(
"Agent versions missing '{0': {1}".format(
KEY_URI, data))
manifest_uri = VMAgentManifestUri(uri=version[KEY_URI])
manifest.versionsManifestUris.append(manifest_uri)
self.agent_manifests.vmAgentManifests.append(manifest)
return self.agent_manifests, self.agent_etag
def get_vmagent_pkgs(self, vmagent_manifest):
data = None
etag = None
for manifest_uri in vmagent_manifest.versionsManifestUris:
try:
data, etag = self._get_data(manifest_uri.uri)
break
except ProtocolError as e:
logger.verbose(
"Error retrieving agent package from {0}: {1}".format(
manifest_uri, e))
if data is None:
raise ProtocolError(
"Failed retrieving agent package from all URIs")
vmagent_pkgs = ExtHandlerPackageList()
set_properties("vmAgentVersions", vmagent_pkgs, data)
return vmagent_pkgs
def get_ext_handlers(self, last_etag=None):
self.update_goal_state()
headers = {
"x-ms-vmagent-public-x509-cert": self._get_trans_cert()
}
ext_list = ExtHandlerList()
data, etag = self._get_data(self.ext_uri, headers=headers)
if last_etag is None or last_etag < etag:
set_properties("extensionHandlers", ext_list.extHandlers, data)
return ext_list, etag
def get_ext_handler_pkgs(self, ext_handler):
logger.verbose("Get extension handler packages")
pkg_list = ExtHandlerPackageList()
manifest = None
for version_uri in ext_handler.versionUris:
try:
manifest, etag = self._get_data(version_uri.uri)
logger.verbose("Successfully downloaded manifest")
break
except ProtocolError as e:
logger.warn("Failed to fetch manifest: {0}", e)
if manifest is None:
raise ValueError("Extension manifest is empty")
set_properties("extensionPackages", pkg_list, manifest)
return pkg_list
def report_provision_status(self, provision_status):
validate_param('provisionStatus', provision_status, ProvisionStatus)
data = get_properties(provision_status)
self._put_data(self.provision_status_uri, data)
def report_vm_status(self, vm_status):
validate_param('vmStatus', vm_status, VMStatus)
data = get_properties(vm_status)
# TODO code field is not implemented for metadata protocol yet.
# Remove it
handler_statuses = data['vmAgent']['extensionHandlers']
for handler_status in handler_statuses:
try:
handler_status.pop('code', None)
except KeyError:
pass
self._put_data(self.vm_status_uri, data)
def report_ext_status(self, ext_handler_name, ext_name, ext_status):
validate_param('extensionStatus', ext_status, ExtensionStatus)
data = get_properties(ext_status)
uri = self.ext_status_uri.format(ext_name)
self._put_data(uri, data)
def report_event(self, events):
validate_param('events', events, TelemetryEventList)
data = get_properties(events)
self._post_data(self.event_uri, data)
def update_certs(self):
certificates = self.get_certs()
return certificates.cert_list
def update_goal_state(self, forced=False, max_retry=3):
# Start updating goalstate, retry on 410
for retry in range(0, max_retry):
try:
self.update_certs()
return
except:
logger.verbose("Incarnation is out of date. Update goalstate.")
raise ProtocolError("Exceeded max retry updating goal state")
class Certificates(object):
"""
Object containing certificates of host and provisioned user.
"""
def __init__(self, client, json_text):
self.cert_list = CertList()
self.parse(json_text)
def parse(self, json_text):
"""
Parse multiple certificates into seperate files.
"""
data = json_text["certificateData"]
if data is None:
logger.verbose("No data in json_text received!")
return
cryptutil = CryptUtil(conf.get_openssl_cmd())
p7b_file = os.path.join(conf.get_lib_dir(), P7B_FILE_NAME)
# Wrapping the certificate lines.
# decode and save the result into p7b_file
fileutil.write_file(p7b_file, base64.b64decode(data), asbin=True)
ssl_cmd = "openssl pkcs7 -text -in {0} -inform der | grep -v '^-----' "
ret, data = shellutil.run_get_output(ssl_cmd.format(p7b_file))
p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME)
p7m = ("MIME-Version:1.0\n"
"Content-Disposition: attachment; filename=\"{0}\"\n"
"Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n"
"Content-Transfer-Encoding: base64\n"
"\n"
"{2}").format(p7m_file, p7m_file, data)
self.save_cache(p7m_file, p7m)
trans_prv_file = os.path.join(conf.get_lib_dir(),
TRANSPORT_PRV_FILE_NAME)
trans_cert_file = os.path.join(conf.get_lib_dir(),
TRANSPORT_CERT_FILE_NAME)
pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME)
# decrypt certificates
cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file,
pem_file)
# The parsing process use public key to match prv and crt.
buf = []
begin_crt = False
begin_prv = False
prvs = {}
thumbprints = {}
index = 0
v1_cert_list = []
with open(pem_file) as pem:
for line in pem.readlines():
buf.append(line)
if re.match(r'[-]+BEGIN.*KEY[-]+', line):
begin_prv = True
elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line):
begin_crt = True
elif re.match(r'[-]+END.*KEY[-]+', line):
tmp_file = self.write_to_tmp_file(index, 'prv', buf)
pub = cryptutil.get_pubkey_from_prv(tmp_file)
prvs[pub] = tmp_file
buf = []
index += 1
begin_prv = False
elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):
tmp_file = self.write_to_tmp_file(index, 'crt', buf)
pub = cryptutil.get_pubkey_from_crt(tmp_file)
thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)
thumbprints[pub] = thumbprint
# Rename crt with thumbprint as the file name
crt = "{0}.crt".format(thumbprint)
v1_cert_list.append({
"name": None,
"thumbprint": thumbprint
})
os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))
buf = []
index += 1
begin_crt = False
# Rename prv key with thumbprint as the file name
for pubkey in prvs:
thumbprint = thumbprints[pubkey]
if thumbprint:
tmp_file = prvs[pubkey]
prv = "{0}.prv".format(thumbprint)
os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv))
for v1_cert in v1_cert_list:
cert = Cert()
set_properties("certs", cert, v1_cert)
self.cert_list.certificates.append(cert)
def save_cache(self, local_file, data):
try:
fileutil.write_file(local_file, data)
except IOError as e:
raise ProtocolError("Failed to write cache: {0}".format(e))
def write_to_tmp_file(self, index, suffix, buf):
file_name = os.path.join(conf.get_lib_dir(),
"{0}.{1}".format(index, suffix))
self.save_cache(file_name, "".join(buf))
return file_name
|
andyliuliming/WALinuxAgent
|
azurelinuxagent/common/protocol/metadata.py
|
Python
|
apache-2.0
| 16,146
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Model classes for Security Groups and Security Group Rules on instances.
"""
from trove.common import cfg
from trove.common import exception
from trove.db.models import DatabaseModelBase
from trove.common.models import NetworkRemoteModelBase
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def persisted_models():
return {
'security_group': SecurityGroup,
'security_group_rule': SecurityGroupRule,
'security_group_instance_association':
SecurityGroupInstanceAssociation,
}
class SecurityGroup(DatabaseModelBase):
_data_fields = ['id', 'name', 'description', 'user', 'tenant_id',
'created', 'updated', 'deleted', 'deleted_at']
@property
def instance_id(self):
return SecurityGroupInstanceAssociation\
.get_instance_id_by_security_group_id(self.id)
@classmethod
def create_sec_group(cls, name, description, context):
try:
remote_sec_group = RemoteSecurityGroup.create(name,
description,
context)
if not remote_sec_group:
raise exception.SecurityGroupCreationError(
"Failed to create Security Group")
else:
return cls.create(
id=remote_sec_group.data()['id'],
name=name,
description=description,
user=context.user,
tenant_id=context.tenant)
except exception.SecurityGroupCreationError as e:
LOG.exception("Failed to create remote security group")
raise e
@classmethod
def create_for_instance(cls, instance_id, context):
# Create a new security group
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
description = _("Security Group for %s") % instance_id
sec_group = cls.create_sec_group(name, description, context)
# Currently this locked down by default, since we don't create any
# default security group rules for the security group.
# Create security group instance association
SecurityGroupInstanceAssociation.create(
security_group_id=sec_group["id"],
instance_id=instance_id)
return sec_group
@classmethod
def get_security_group_by_id_or_instance_id(self, id, tenant_id):
try:
return SecurityGroup.find_by(id=id,
tenant_id=tenant_id,
deleted=False)
except exception.ModelNotFoundError:
return SecurityGroupInstanceAssociation.\
get_security_group_by_instance_id(id)
def get_rules(self):
return SecurityGroupRule.find_all(group_id=self.id,
deleted=False)
def delete(self, context):
try:
sec_group_rules = self.get_rules()
if sec_group_rules:
for rule in sec_group_rules:
rule.delete(context)
RemoteSecurityGroup.delete(self.id, context)
super(SecurityGroup, self).delete()
except exception.TroveError:
LOG.exception('Failed to delete security group')
raise exception.TroveError("Failed to delete Security Group")
@classmethod
def delete_for_instance(cls, instance_id, context):
try:
association = SecurityGroupInstanceAssociation.find_by(
instance_id=instance_id,
deleted=False)
if association:
sec_group = association.get_security_group()
if sec_group:
sec_group.delete(context)
association.delete()
except (exception.ModelNotFoundError,
exception.TroveError):
LOG.info(_('Security Group with id: %(id)s '
'already had been deleted')
% {'id': instance_id})
class SecurityGroupRule(DatabaseModelBase):
_data_fields = ['id', 'parent_group_id', 'protocol', 'from_port',
'to_port', 'cidr', 'group_id', 'created', 'updated',
'deleted', 'deleted_at']
@classmethod
def create_sec_group_rule(cls, sec_group, protocol, from_port,
to_port, cidr, context):
try:
remote_rule_id = RemoteSecurityGroup.add_rule(
sec_group_id=sec_group['id'],
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
context=context)
if not remote_rule_id:
raise exception.SecurityGroupRuleCreationError(
"Failed to create Security Group Rule")
else:
# Create db record
return cls.create(
id=remote_rule_id,
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
group_id=sec_group['id'])
except exception.SecurityGroupRuleCreationError as e:
LOG.exception("Failed to create remote security group")
raise e
def get_security_group(self, tenant_id):
return SecurityGroup.find_by(id=self.group_id,
tenant_id=tenant_id,
deleted=False)
def delete(self, context):
try:
# Delete Remote Security Group Rule
RemoteSecurityGroup.delete_rule(self.id, context)
super(SecurityGroupRule, self).delete()
except exception.TroveError:
LOG.exception('Failed to delete security group')
raise exception.SecurityGroupRuleDeletionError(
"Failed to delete Security Group")
class SecurityGroupInstanceAssociation(DatabaseModelBase):
_data_fields = ['id', 'security_group_id', 'instance_id',
'created', 'updated', 'deleted', 'deleted_at']
def get_security_group(self):
return SecurityGroup.find_by(id=self.security_group_id,
deleted=False)
@classmethod
def get_security_group_by_instance_id(cls, id):
association = SecurityGroupInstanceAssociation.find_by(
instance_id=id,
deleted=False)
return association.get_security_group()
@classmethod
def get_instance_id_by_security_group_id(cls, secgroup_id):
association = SecurityGroupInstanceAssociation.find_by(
security_group_id=secgroup_id,
deleted=False)
return association.instance_id
class RemoteSecurityGroup(NetworkRemoteModelBase):
_data_fields = ['id', 'name', 'description', 'rules']
def __init__(self, security_group=None, id=None, context=None):
if id is None and security_group is None:
msg = "Security Group does not have id defined!"
raise exception.InvalidModelError(msg)
elif security_group is None:
driver = self.get_driver(context)
self._data_object = driver.get_sec_group_by_id(group_id=id)
else:
self._data_object = security_group
@classmethod
def create(cls, name, description, context):
"""Creates a new Security Group."""
driver = cls.get_driver(context)
sec_group = driver.create_security_group(
name=name, description=description)
return RemoteSecurityGroup(security_group=sec_group)
@classmethod
def delete(cls, sec_group_id, context):
"""Deletes a Security Group."""
driver = cls.get_driver(context)
driver.delete_security_group(sec_group_id)
@classmethod
def add_rule(cls, sec_group_id, protocol, from_port,
to_port, cidr, context):
"""Adds a new rule to an existing security group."""
driver = cls.get_driver(context)
sec_group_rule = driver.add_security_group_rule(
sec_group_id, protocol, from_port, to_port, cidr)
return sec_group_rule.id
@classmethod
def delete_rule(cls, sec_group_rule_id, context):
"""Deletes a rule from an existing security group."""
driver = cls.get_driver(context)
driver.delete_security_group_rule(sec_group_rule_id)
|
changsimon/trove
|
trove/extensions/security_group/models.py
|
Python
|
apache-2.0
| 9,284
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""WSGI servers to power the LIT backend."""
import socket
import threading
from typing import Optional, Text, List
from wsgiref import validate
import wsgiref.simple_server
from absl import logging
import portpicker
from werkzeug import serving as werkzeug_serving
class BasicDevServer(object):
"""Basic development server; not recommended for deployment."""
def __init__(self, wsgi_app, port: int = 4321, host: Text = '127.0.0.1',
**unused_kw):
self._port = port
self._host = host
self._app = wsgi_app
self.can_act_as_model_server = True
def serve(self):
"""Start serving."""
logging.info(('\n\nStarting Server on port %d'
'\nYou can navigate to %s:%d\n\n'), self._port, self._host,
self._port)
werkzeug_serving.run_simple(
self._host,
self._port,
self._app,
use_debugger=False,
use_reloader=False)
class WsgiServerIpv6(wsgiref.simple_server.WSGIServer):
"""IPv6 based extension of the simple WSGIServer."""
address_family = socket.AF_INET6
class NotebookWsgiServer(object):
"""WSGI server for notebook environments."""
def __init__(self, wsgi_app, host: Text = 'localhost',
port: Optional[int] = None, **unused_kw):
"""Initialize the WSGI server.
Args:
wsgi_app: WSGI pep-333 application to run.
host: Host to run on, defaults to 'localhost'.
port: Port to run on. If not specified, then an unused one will be picked.
"""
self._app = wsgi_app
self._host = host
self._port = port
self._server_thread = None
self.can_act_as_model_server = False
@property
def port(self):
"""Returns the current port or error if the server is not started.
Raises:
RuntimeError: If server has not been started yet.
Returns:
The port being used by the server.
"""
if self._server_thread is None:
raise RuntimeError('Server not started.')
return self._port
def stop(self):
"""Stops the server thread."""
if self._server_thread is None:
return
self._stopping.set()
self._server_thread = None
self._stopped.wait()
def serve(self):
"""Starts a server in a thread using the WSGI application provided.
Will wait until the thread has started calling with an already serving
application will simple return.
"""
if self._server_thread is not None:
return
if self._port is None:
self._port = portpicker.pick_unused_port()
started = threading.Event()
self._stopped = threading.Event()
self._stopping = threading.Event()
def build_server(started, stopped, stopping):
"""Closure to build the server function to be passed to the thread.
Args:
started: Threading event to notify when started.
stopped: Threading event to notify when stopped.
stopping: Threading event to notify when stopping.
Returns:
A function that function that takes a port and WSGI app and notifies
about its status via the threading events provided.
"""
def server(port, wsgi_app):
"""Serve a WSGI application until stopped.
Args:
port: Port number to serve on.
wsgi_app: WSGI application to serve.
"""
try:
httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)
except socket.error:
# Try IPv6
httpd = wsgiref.simple_server.make_server(
self._host, port, wsgi_app, server_class=WsgiServerIpv6)
started.set()
httpd.timeout = 30
while not stopping.is_set():
httpd.handle_request()
stopped.set()
return server
server = build_server(started, self._stopped, self._stopping)
server_thread = threading.Thread(
target=server, args=(self._port, self._app))
self._server_thread = server_thread
server_thread.start()
started.wait()
|
pair-code/lit
|
lit_nlp/lib/wsgi_serving.py
|
Python
|
apache-2.0
| 4,652
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import urllib
import urllib2
import sys
import time
import getpass
import appengine_rpc
LIST_DELIMITER = '\n'
TUPLE_DELIMITER = '|'
MAX_BATCH_SIZE = 1000000
MAX_BATCH_COUNT = 100
MAX_BATCH_FILE_SIZE = 200000
BATCH_OVERHEAD = 500
BASE_DIR = "fetchserver"
verbosity = 1
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = None
if email is None:
email = raw_input('Google Account Email: ')
password_prompt = 'Password for %s: ' % email
password = getpass.getpass(password_prompt)
return (email, password)
class UploadBatcher(object):
"""Helper to batch file uploads."""
def __init__(self, what, app_id, version, server):
"""Constructor.
Args:
what: Either 'file' or 'blob' or 'errorblob' indicating what kind of
objects this batcher uploads. Used in messages and URLs.
app_id: The application ID.
version: The application version string.
server: The RPC server.
"""
assert what in ('file', 'blob', 'errorblob'), repr(what)
self.what = what
self.app_id = app_id
self.version = version
self.server = server
self.single_url = '/api/appversion/add' + what
self.batch_url = self.single_url + 's'
self.batching = True
self.batch = []
self.batch_size = 0
def SendBatch(self):
"""Send the current batch on its way.
If successful, resets self.batch and self.batch_size.
Raises:
HTTPError with code=404 if the server doesn't support batching.
"""
boundary = 'boundary'
parts = []
for path, payload, mime_type in self.batch:
while boundary in payload:
boundary += '%04x' % random.randint(0, 0xffff)
assert len(boundary) < 80, 'Unexpected error, please try again.'
part = '\n'.join(['', 'X-Appcfg-File: %s' % urllib.quote(path), 'X-Appcfg-Hash: %s' % _Hash(payload), 'Content-Type: %s' % mime_type, 'Content-Length: %d' % len(payload), 'Content-Transfer-Encoding: 8bit', '', payload, ])
parts.append(part)
parts.insert(0, 'MIME-Version: 1.0\n' 'Content-Type: multipart/mixed; boundary="%s"\n' '\n' 'This is a message with multiple parts in MIME format.' % boundary)
parts.append('--\n')
delimiter = '\n--%s' % boundary
payload = delimiter.join(parts)
self.server.Send(self.batch_url, payload=payload, content_type='message/rfc822', app_id=self.app_id, version=self.version)
self.batch = []
self.batch_size = 0
def SendSingleFile(self, path, payload, mime_type):
"""Send a single file on its way."""
self.server.Send(self.single_url, payload=payload, content_type=mime_type, path=path, app_id=self.app_id, version=self.version)
def Flush(self):
"""Flush the current batch.
This first attempts to send the batch as a single request; if that
fails because the server doesn't support batching, the files are
sent one by one, and self.batching is reset to False.
At the end, self.batch and self.batch_size are reset.
"""
if not self.batch:
return
try:
self.SendBatch()
except urllib2.HTTPError, err:
if err.code != 404:
raise
self.batching = False
for path, payload, mime_type in self.batch:
self.SendSingleFile(path, payload, mime_type)
self.batch = []
self.batch_size = 0
def AddToBatch(self, path, payload, mime_type):
"""Batch a file, possibly flushing first, or perhaps upload it directly.
Args:
path: The name of the file.
payload: The contents of the file.
mime_type: The MIME Content-type of the file, or None.
If mime_type is None, application/octet-stream is substituted.
"""
if not mime_type:
mime_type = 'application/octet-stream'
size = len(payload)
if size <= MAX_BATCH_FILE_SIZE:
if (len(self.batch) >= MAX_BATCH_COUNT or self.batch_size + size > MAX_BATCH_SIZE):
self.Flush()
if self.batching:
self.batch.append((path, payload, mime_type))
self.batch_size += size + BATCH_OVERHEAD
return
self.SendSingleFile(path, payload, mime_type)
def StatusUpdate(msg):
"""Print a status message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print >>sys.stderr, msg
def _Hash(content):
"""Compute the hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
m = hashlib.sha1()
m.update(content)
h = m.hexdigest()
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs,errorblobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[0]
tup = tup[1:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
def RetryWithBackoff(initial_delay, backoff_factor, max_delay, max_tries, callable_func):
"""Calls a function multiple times, backing off more and more each time.
Args:
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_delay: Max delay factor.
max_tries: Maximum number of tries.
callable_func: The method to call, will pass no arguments.
Returns:
True if the function succeded in one of its tries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
if callable_func():
return True
while max_tries > 1:
StatusUpdate('Will check again in %s seconds.' % delay)
time.sleep(delay)
delay *= backoff_factor
if max_delay and delay > max_delay:
delay = max_delay
max_tries -= 1
if callable_func():
return True
return False
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
server: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
files: A dictionary of files to upload to the server, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
"""
def __init__(self, server, app_id):
"""Creates a new AppVersionUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer or
TestRpcServer.
"""
self.server = server
self.app_id = app_id
self.version = 1
self.yaml = """application: %s
version: 1
runtime: python
api_version: 1
handlers:
- url: /portal
script: portal.py
secure: optional
- url: /clean
script: clean.py
login: admin
- url: /clean_guesscnt
script: clean_guesscnt.py
login: admin
- url: /rekey
script: rekey.py
secure: optional
""" % app_id
self.cron_yaml = """cron:
- description: clean expired states and nonces
url: /clean
schedule: every 15 minutes
- description: reset password guess counter
url: /clean_guesscnt
schedule: every 24 hours"""
self.index_yaml = """
indexes:
- kind: Nonce
properties:
- name: random
- name: timestamp
"""
self.files = {}
self.in_transaction = False
self.deployed = False
self.batching = True
self.file_batcher = UploadBatcher('file', self.app_id, self.version, self.server)
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, 'Already in a transaction.'
assert file_handle is not None
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
self.files[path] = content_hash
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, 'Already in a transaction.'
StatusUpdate('Initiating update.')
self.server.Send('/api/appversion/create', app_id=self.app_id, version=self.version, payload=self.yaml)
self.in_transaction = True
files_to_clone = []
for path, content_hash in self.files.iteritems():
files_to_clone.append((path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
result = self.server.Send(url, app_id=self.app_id, version=self.version, payload=BuildClonePostBody(files))
if result:
files_to_upload.update(dict((f, self.files[f]) for f in result.split(LIST_DELIMITER)))
CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def DoUploadCron(self):
"""Uploads the cron entries."""
StatusUpdate('Uploading cron entries.')
self.server.Send('/api/cron/update',
app_id=self.app_id,
version=self.version,
payload=self.cron_yaml)
def DoUploadIndex(self):
StatusUpdate('Uploading index definitions.')
self.server.Send('/api/datastore/index/add',
app_id=self.app_id,
version=self.version,
payload=self.index_yaml)
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, 'Begin() must be called before UploadFile().'
if path not in self.files:
raise KeyError('File \'%s\' is not in the list of files to be uploaded.' % path)
del self.files[path]
self.file_batcher.AddToBatch(path, file_handle.read(), None)
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Commit().'
if self.files:
raise Exception('Not all required files have been uploaded.')
try:
self.Deploy()
if not RetryWithBackoff(1, 2, 60, 20, self.IsReady):
raise Exception('Version not ready.')
self.StartServing()
except urllib2.HTTPError, e:
if e.code != 404:
raise
StatusUpdate('Closing update.')
self.server.Send('/api/appversion/commit', app_id=self.app_id, version=self.version)
self.in_transaction = False
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Deploy().'
if self.files:
raise Exception('Not all required files have been uploaded.')
StatusUpdate('Deploying new version.')
self.server.Send('/api/appversion/deploy', app_id=self.app_id, version=self.version)
self.deployed = True
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Checking if new version is ready to serve.')
result = self.server.Send('/api/appversion/isready', app_id=self.app_id, version=self.version)
return result == '1'
def StartServing(self):
"""Start serving with the newly created version.
Raises:
Exception: Deploy has not yet been called.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Closing update: new version is ready to start serving.')
self.server.Send('/api/appversion/startserving', app_id=self.app_id, version=self.version)
self.in_transaction = False
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate('Rolling back the update.')
self.server.Send('/api/appversion/rollback', app_id=self.app_id, version=self.version)
self.in_transaction = False
self.files = {}
def DoUpload(self):
"""Uploads a new appversion with the given config and files to the server."""
files = ['bpkaspak/conversion.py',
'bpkaspak/ellipticcurve.py', 'bpkaspak/mymath.py',
'bpkaspak/publickey.py', 'bpkaspak/redp.py', 'bpkaspak/__init__.py',
'agreestate.py', 'clean.py', 'clean_guesscnt.py', 'enc.py', 'key.py',
'mailhide.py', 'nonce.py', 'pendingreq.py', 'portal.py', 'rekey.py',
'password.py']
for file in files:
self.AddFile(file, open("%s/%s" % (BASE_DIR, file), "r"))
try:
missing_files = self.Begin()
if missing_files:
StatusUpdate('Uploading %d files and blobs.' % len(missing_files))
num_files = 0
for missing_file in missing_files:
file_handle = open("%s/%s" % (BASE_DIR, missing_file), "r")
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
self.file_batcher.Flush()
StatusUpdate('Uploaded %d files and blobs' % num_files)
self.Commit()
self.DoUploadCron()
self.DoUploadIndex()
except:
self.Rollback()
raise
def main():
print "==========Uploader for SecureGAppProxy=============="
if len(sys.argv) == 2 and sys.argv[1] != "update" and sys.argv[1] != "rollback":
print "Usage: %s [update|rollback]" % sys.argv[0]
return
rpc_server = appengine_rpc.HttpRpcServer("appengine.google.com", GetUserCredentials, "GAppProxy Uploader", "0.0.1", host_override=None, save_cookies=True, auth_tries=3, account_type='HOSTED_OR_GOOGLE', secure=True)
appspot_id = raw_input("Appspot ID: ")
appspot_domain = "%s.appspot.com" % appspot_id
appversion = AppVersionUpload(rpc_server, appspot_id)
if len(sys.argv) == 2 and sys.argv[1] == "rollback":
appversion.in_transaction = True
appversion.Rollback()
else: # update
import chpwd, os
h = chpwd.get_password_hash(appspot_domain)
pwdfile = open(os.path.join(BASE_DIR, 'password.py'),'w+')
pwdfile.write('password = "%s"' % h)
pwdfile.close()
appversion.DoUpload()
os.remove(os.path.join(BASE_DIR, 'password.py'))
raw_input('Press enter to continue...')
if __name__ == "__main__":
main()
|
tectronics/secure-gappproxy
|
uploader/uploader.py
|
Python
|
gpl-3.0
| 15,597
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-09 16:54
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parliament', '0002_person_clean_name'),
]
operations = [
migrations.AlterField(
model_name='parlsession',
name='total_seats',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='parlsession',
name='years',
field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), null=True, size=None),
),
]
|
mcallaghan/tmv
|
BasicBrowser/parliament/migrations/0003_auto_20180309_1654.py
|
Python
|
gpl-3.0
| 723
|
__author__ = 'jiataogu'
|
MingyuanXie/CopyNet
|
emolga/models/__init__.py
|
Python
|
mit
| 24
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import F, Q
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from yepes import fields
from yepes.apps import apps
from yepes.cache import LookupTable
from yepes.contrib.registry import registry
from yepes.loading import LazyModel
from yepes.model_mixins import (
Enableable,
Illustrated,
Logged,
MetaData,
Orderable,
Slugged,
)
from yepes.utils.emails import normalize_email, validate_email
from yepes.utils.html import extract_text
from yepes.utils.properties import described_property
from yepes.validators.email import DOMAIN_RE
NewsletterManager = apps.get_class('newsletters.managers', 'NewsletterManager')
Delivery = LazyModel('newsletters', 'Delivery')
Domain = LazyModel('newsletters', 'Domain')
class AbstractBounce(models.Model):
message = models.ForeignKey(
'Message',
on_delete=models.CASCADE,
related_name='bounces',
verbose_name=_('Message'))
newsletter = models.ForeignKey(
'Newsletter',
on_delete=models.CASCADE,
related_name='bounces',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
null=True,
on_delete=models.SET_NULL,
related_name='bounces',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
null=True,
on_delete=models.SET_NULL,
related_name='bounces',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
default=timezone.now,
verbose_name=_('Bounce Date'))
header = fields.TextField(
blank=True,
verbose_name=_('Header'))
body = fields.TextField(
blank=True,
verbose_name=_('Body'))
class Meta:
abstract = True
ordering = ['-date']
verbose_name = _('Bounce')
verbose_name_plural = _('Bounces')
def save(self, **kwargs):
if self.pk is None:
if (self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
if (self.newsletter_id is None
and self.message_id is not None):
self.newsletter_id = self.message.newsletter_id
super(AbstractBounce, self).save(**kwargs)
class AbstractClick(models.Model):
link = models.ForeignKey(
'MessageLink',
editable=False,
on_delete=models.CASCADE,
related_name='clicks',
verbose_name=_('Message Link'))
message = models.ForeignKey(
'Message',
editable=False,
on_delete=models.CASCADE,
related_name='clicks',
verbose_name=_('Message'))
newsletter = models.ForeignKey(
'Newsletter',
editable=False,
on_delete=models.CASCADE,
related_name='clicks',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='clicks',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='clicks',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_('Click Date'))
class Meta:
abstract = True
ordering = ['-date']
verbose_name = _('Click')
verbose_name_plural = _('Clicks')
def save(self, **kwargs):
if self.pk is None:
if (self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
if (self.newsletter_id is None
and self.message_id is not None):
self.newsletter_id = self.message.newsletter_id
super(AbstractClick, self).save(**kwargs)
class AbstractDelivery(models.Model):
message = models.ForeignKey(
'Message',
editable=False,
on_delete=models.CASCADE,
related_name='deliveries',
verbose_name=_('Message'))
newsletter = models.ForeignKey(
'Newsletter',
editable=False,
on_delete=models.CASCADE,
related_name='deliveries',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
editable=False,
on_delete=models.CASCADE,
related_name='deliveries',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
editable=False,
on_delete=models.CASCADE,
related_name='deliveries',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
db_index=True,
editable=False,
verbose_name=_('Estimated Date'))
is_processed = fields.BooleanField(
db_index=True,
default=False,
editable=False,
verbose_name=_('Is Processed?'))
process_date = models.DateTimeField(
blank=True,
editable=False,
null=True,
verbose_name=_('Effective Date'))
is_bounced = fields.BooleanField(
db_index=True,
default=False,
editable=False,
verbose_name=_('Is Bounced?'))
bounce_date = models.DateTimeField(
blank=True,
editable=False,
null=True,
verbose_name=_('Bounce Date'))
is_opened = fields.BooleanField(
db_index=True,
default=False,
editable=False,
verbose_name=_('Is Opened?'))
open_date = models.DateTimeField(
blank=True,
editable=False,
null=True,
verbose_name=_('Open Date'))
is_clicked = fields.BooleanField(
db_index=True,
default=False,
editable=False,
verbose_name=_('Is Clicked?'))
click_date = models.DateTimeField(
blank=True,
editable=False,
null=True,
verbose_name=_('Click Date'))
class Meta:
abstract = True
ordering = ['-date']
unique_together = [('message', 'subscriber')]
verbose_name = _('Delivery')
verbose_name_plural = _('Deliveries')
def save(self, **kwargs):
if self.pk is None:
if (self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
if (self.newsletter_id is None
and self.message_id is not None):
self.newsletter_id = self.message.newsletter_id
super(AbstractDelivery, self).save(**kwargs)
# PROPERTIES
@described_property(_('Response Time'))
def response_time(self):
if self.process_date is None or self.open_date is None:
return None
else:
return self.open_date - self.process_date
@python_2_unicode_compatible
class AbstractDomain(models.Model):
name = fields.CharField(
editable=False,
max_length=63,
unique=True,
validators=[RegexValidator(DOMAIN_RE)],
verbose_name=_('Domain'))
is_trusted = fields.BooleanField(
default=False,
verbose_name=_('Is Trusted?'))
objects = models.Manager()
cache = LookupTable(['name'])
class Meta:
abstract = True
ordering = ['name']
verbose_name = _('E-mail Domain')
verbose_name_plural = _('Domains')
def __str__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return ('name__icontains', )
@python_2_unicode_compatible
class AbstractMessage(Logged, Slugged):
newsletter = models.ForeignKey(
'Newsletter',
on_delete=models.CASCADE,
related_name='messages',
verbose_name=_('Newsletter'))
guid = fields.GuidField(
max_length=15,
editable=False,
unique=True,
verbose_name=_('Global Unique Identifier'))
subject = fields.CharField(
max_length=255,
verbose_name=_('Subject'))
html = fields.TextField(
verbose_name=_('HTML Version'))
text = fields.TextField(
blank=True,
verbose_name=_('Plain Text Version'))
is_sent = fields.BooleanField(
default=False,
editable=False,
verbose_name=_('Is Sent?'))
class Meta:
abstract = True
verbose_name = _('Message')
verbose_name_plural = _('Messages')
def __str__(self):
return self.subject
@staticmethod
def autocomplete_search_fields():
return ('subject__icontains', )
def get_absolute_url(self):
kwargs = {
#'message_pk': self.pk,
#'message_slug': self.slug,
'message_guid': self.guid,
}
return reverse('message', kwargs=kwargs)
def save(self, **kwargs):
if not self.text:
self.text = extract_text(self.html)
super(AbstractMessage, self).save(**kwargs)
@python_2_unicode_compatible
class AbstractMessageImage(Illustrated, Logged):
guid = fields.GuidField(
max_length=7,
editable=False,
unique=True,
verbose_name=_('Global Unique Identifier'))
name = fields.IdentifierField(
unique=True,
max_length=63,
verbose_name=_('Name'))
class Meta:
abstract = True
folder_name = 'newsletters'
ordering = ['name']
verbose_name = _('Message Image')
verbose_name_plural = _('Message Images')
def __str__(self):
return self.name
def get_upload_path(self, filename):
return super(AbstractMessageImage, self).get_upload_path(self.name)
@python_2_unicode_compatible
class AbstractMessageLink(Logged):
guid = fields.GuidField(
max_length=15,
editable=False,
unique=True,
verbose_name=_('Global Unique Identifier'))
url = models.URLField(
editable=True,
unique=True,
max_length=255,
verbose_name=_('URL'))
class Meta:
abstract = True
ordering = ['url']
verbose_name = _('Message Link')
verbose_name_plural = _('Message Links')
def __str__(self):
return self.url
@python_2_unicode_compatible
class AbstractNewsletter(Orderable, Logged, Slugged, MetaData):
"""
A regularly distributed publication to which subscribers can subscribe.
"""
connection = fields.CachedForeignKey(
'emails.Connection',
on_delete=models.CASCADE,
related_name='newsletters',
verbose_name=_('E-mail Connection'))
guid = fields.GuidField(
max_length=7,
editable=False,
unique=True,
verbose_name=_('Global Unique Identifier'))
name = fields.CharField(
unique=True,
max_length=63,
verbose_name=_('Name'))
description = fields.RichTextField(
blank=True,
verbose_name=_('Description'))
is_published = fields.BooleanField(
default=True,
verbose_name=_('Is Published?'))
sender_name = fields.CharField(
max_length=127,
verbose_name=_("Sender's Name"))
sender_address = fields.CharField(
max_length=127,
verbose_name=_("Sender's Address"))
reply_to_name = fields.CharField(
blank=True,
max_length=127,
verbose_name=_("Reply To Name"))
reply_to_address = fields.CharField(
blank=True,
max_length=127,
verbose_name=_("Reply To Address"))
return_path_name = fields.CharField(
blank=True,
max_length=127,
verbose_name=_("Return To Name"))
return_path_address = fields.CharField(
blank=True,
max_length=127,
verbose_name=_("Return To Address"))
objects = NewsletterManager()
cache = LookupTable(['guid', 'name'])
class Meta:
abstract = True
verbose_name = _('Newsletter')
verbose_name_plural = _('Newsletters')
def __str__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return ('name__icontains', )
# CUSTOM METHODS
def get_default_meta_index(self):
if self.is_published:
return super(AbstractNewsletter, self).get_default_meta_index()
else:
return False
# PROPERTIES
@described_property(_('Reply To'))
def reply_to(self):
if self.reply_to_name:
return '"{0}" <{1}>'.format(self.reply_to_name, self.reply_to_address)
elif self.reply_to_address:
return self.reply_to_address
else:
return None
@described_property(_('Return Path'))
def return_path(self):
if self.return_path_name:
return '"{0}" <{1}>'.format(self.return_path_name, self.return_path_address)
elif self.return_path_address:
return self.return_path_address
else:
return None
@described_property(_('Sender'))
def sender(self):
if self.sender_name:
return '"{0}" <{1}>'.format(self.sender_name, self.sender_address)
elif self.sender_address:
return self.sender_address
else:
return None
class AbstractOpen(models.Model):
message = models.ForeignKey(
'Message',
editable=False,
on_delete=models.CASCADE,
related_name='opens',
verbose_name=_('Message'))
newsletter = models.ForeignKey(
'Newsletter',
editable=False,
on_delete=models.CASCADE,
related_name='opens',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='opens',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='opens',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_('Open Date'))
class Meta:
abstract = True
ordering = ['-date']
verbose_name = _('Open')
verbose_name_plural = _('Opens')
def save(self, **kwargs):
if self.pk is None:
if (self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
if (self.newsletter_id is None
and self.message_id is not None):
self.newsletter_id = self.message.newsletter_id
super(AbstractOpen, self).save(**kwargs)
@python_2_unicode_compatible
class AbstractSubscriber(Enableable, Logged):
guid = fields.GuidField(
max_length=31,
editable=False,
unique=True,
verbose_name=_('Global Unique Identifier'))
email_address = fields.EmailField(
max_length=127,
unique=True,
verbose_name=_('E-mail Address'))
email_domain = models.ForeignKey(
'Domain',
editable=False,
on_delete=models.CASCADE,
related_name='subscribers',
verbose_name=_('E-mail Domain'))
first_name = fields.CharField(
blank=True,
max_length=63,
verbose_name=_('First Name'))
last_name = fields.CharField(
blank=True,
max_length=63,
verbose_name=_('Last Name'))
newsletters = models.ManyToManyField(
'Newsletter',
through='Subscription',
related_name='subscribers',
verbose_name=_('Newsletters'))
tags = models.ManyToManyField(
'SubscriberTag',
blank=True,
related_name='subscribers',
verbose_name=_('Tags'))
score = fields.FloatField(
blank=True,
db_index=True,
default=2.0,
editable=False,
verbose_name=_('Score'))
class Meta:
abstract = True
ordering = ['email_address']
verbose_name = _('Subscriber')
verbose_name_plural = _('Subscribers')
def __str__(self):
return self.email_address
@staticmethod
def autocomplete_search_fields():
return ('email_address__icontains',
'first_name__icontains',
'last_name__icontains')
# CUSTOM METHODS
def is_subscribed_to(self, newsletter):
if not self._get_pk_val():
return False
else:
return self.subscriptions.filter(newsletter=newsletter).exists()
def set_email(self, address):
address = normalize_email(address)
if not validate_email(address):
msg = "'{0}' is not a valid email address."
raise ValueError(msg.format(address))
_, domain_name = address.rsplit('@', 1)
domain, _ = Domain.objects.get_or_create(name=domain_name)
self.email_address = address
self.email_domain = domain
def resubscribe_to(self, newsletter):
if not self.is_subscribed_to(newsletter):
qs = self.unsubscriptions.filter(newsletter=newsletter)
unsubscription = qs.order_by('date').last()
if unsubscription is not None:
unsubscription.delete()
return self.subscriptions.create(newsletter=newsletter)
def subscribe_to(self, newsletter):
if self.is_subscribed_to(newsletter):
return None
else:
return self.subscriptions.create(newsletter=newsletter)
def unsubscribe_from(self, newsletter, reason=None, last_message=None):
if self.is_subscribed_to(newsletter):
self.subscriptions.filter(newsletter=newsletter).delete()
kwargs = {
'newsletter': newsletter,
'reason': reason,
'last_message': last_message,
}
return self.unsubscriptions.create(**kwargs)
# PROPERTIES
@described_property(_('Name'))
def full_name(self):
return ' '.join((
self.first_name,
self.last_name,
)).strip()
@python_2_unicode_compatible
class AbstractSubscriberTag(Logged):
name = fields.CharField(
unique=True,
max_length=63,
verbose_name=_('Name'))
description = fields.TextField(
blank=True,
verbose_name=_('Description'))
objects = models.Manager()
cache = LookupTable(['name'])
class Meta:
abstract = True
ordering = ['name']
verbose_name = _('Subscriber Tag')
verbose_name_plural = _('Subscriber Tags')
def __str__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return ('name__icontains', )
class AbstractSubscription(models.Model):
newsletter = models.ForeignKey(
'Newsletter',
editable=False,
on_delete=models.CASCADE,
related_name='subscriptions',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
editable=False,
on_delete=models.CASCADE,
related_name='subscriptions',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='subscriptions',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_('Subscription Date'))
class Meta:
abstract = True
ordering = ['-date']
unique_together = [('newsletter', 'subscriber')]
verbose_name = _('Subscription')
verbose_name_plural = _('Subscriptions')
def save(self, **kwargs):
if (self.pk is None
and self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
super(AbstractSubscription, self).save(**kwargs)
class AbstractUnsubscription(models.Model):
newsletter = models.ForeignKey(
'Newsletter',
editable=False,
on_delete=models.CASCADE,
related_name='unsubscriptions',
verbose_name=_('Newsletter'))
subscriber = models.ForeignKey(
'Subscriber',
editable=False,
on_delete=models.CASCADE,
related_name='unsubscriptions',
verbose_name=_('Subscriber'))
domain = models.ForeignKey(
'Domain',
editable=False,
null=True,
on_delete=models.SET_NULL,
related_name='unsubscriptions',
verbose_name=_('E-mail Domain'))
date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_('Unsubscribe Date'))
reason = models.ForeignKey(
'UnsubscriptionReason',
editable=False,
null=True,
on_delete=models.CASCADE,
related_name='unsubscriptions',
verbose_name=_('Unsubscription Reason'))
last_message = models.ForeignKey(
'Message',
editable=False,
null=True,
on_delete=models.CASCADE,
related_name='unsubscriptions',
verbose_name=_('Last Message'))
class Meta:
abstract = True
ordering = ['-date']
verbose_name = _('Unsubscription')
verbose_name_plural = _('Unsubscriptions')
def save(self, **kwargs):
if self.pk is None:
if (self.last_message_id is None
and self.newsletter_id is not None
and self.subscriber_id is not None):
delivery = Delivery.objects.filter(
newsletter=self.newsletter_id,
subscriber=self.subscriber_id,
).order_by(
'date'
).last()
if delivery is not None:
self.last_message_id = delivery.message_id
if (self.domain_id is None
and self.subscriber_id is not None):
self.domain_id = self.subscriber.email_domain_id
super(AbstractUnsubscription, self).save(**kwargs)
@python_2_unicode_compatible
class AbstractUnsubscriptionReason(Orderable):
description = fields.CharField(
max_length=255,
verbose_name=_('Description'))
class Meta:
abstract = True
verbose_name = _('Unsubscription Reason')
verbose_name_plural = _('Unsubscription Reasons')
def __str__(self):
return self.description
@staticmethod
def autocomplete_search_fields():
return ('description__icontains', )
|
samuelmaudo/yepes
|
yepes/contrib/newsletters/abstract_models.py
|
Python
|
bsd-3-clause
| 24,266
|
"""
MultiVolumeVisualization
:Authors:
Berend Klein Haneveld
"""
from PySide.QtCore import QObject
from PySide.QtCore import Signal
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
# Define render types for multi render
MultiVisualizationTypeMix = "Default mix"
MultiVisualizationTypeMIP = "Combined MIP"
MultiVisualizationTypeMIDA = "Single MIDA"
# Volume Properties
class MultiVolumeVisualization(QObject):
"""
MultiVolumeVisualization is the superclass for all multi
volume visualizations.
"""
updatedTransferFunction = Signal()
def __init__(self):
super(MultiVolumeVisualization, self).__init__()
self.fixedVolProp = None
self.movingVolProp = None
def getParameterWidget(self):
raise NotImplementedError()
def setImageData(self, fixedImageData, movingImageData):
pass
def updateTransferFunctions(self):
raise NotImplementedError()
def valueChanged(self, value):
raise NotImplementedError()
def setMapper(self, mapper):
raise NotImplementedError()
def setFixedVisualization(self, visualization):
"""
:type visualization: VolumeVisualization
"""
pass
def setMovingVisualization(self, visualization):
"""
:type visualization: VolumeVisualization
"""
pass
# Convenience functions
def CreateFunctionFromProperties(opacity, volProp):
"""
:type opacityFunction: vtkVolumeProperty
"""
opacityFunction = volProp.GetScalarOpacity()
for index in range(opacityFunction.GetSize()):
val = [0 for x in range(4)]
opacityFunction.GetNodeValue(index, val)
val[1] = val[1] * float(opacity)
opacityFunction.SetNodeValue(index, val)
return opacityFunction
def CreateEmptyFunctions():
"""
:rtype: vtkColorTransferFunction, vtkPiecewiseFunction
"""
# Transfer functions and properties
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(0, 0, 0, 0, 0.0, 0.0)
colorFunction.AddRGBPoint(1000, 0, 0, 0, 0.0, 0.0)
opacityFunction = vtkPiecewiseFunction()
opacityFunction.AddPoint(0, 0, 0.0, 0.0)
opacityFunction.AddPoint(1000, 0, 0.0, 0.0)
return colorFunction, opacityFunction
def CreateRangeFunctions(imageData, color=None):
"""
:type imageData: vktImageData
:type color: array of length 3 (r, g, b)
:rtype: vtkColorTransferFunction, vtkPiecewiseFunction
"""
col = [1, 1, 1]
if color is not None:
col = color
minimum, maximum = imageData.GetScalarRange()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBSegment(minimum, 0, 0, 0, maximum, col[0], col[1], col[2])
opacityFunction = vtkPiecewiseFunction()
opacityFunction.AddSegment(minimum, 0.0, maximum, 1.0)
return colorFunction, opacityFunction
|
berendkleinhaneveld/Registrationshop
|
ui/visualizations/MultiVolumeVisualization.py
|
Python
|
mit
| 2,648
|
import unittest
from dgvm.data_structures import Heap
class HeapTests(unittest.TestCase):
def test_history(self):
t = Heap(128)
t.checkpoint()
t[0] = 1
assert t.get(0) == 1
t.revert()
assert t.get(0) is None
t[0] = 'abcde'
assert t[0] == 'abcde'
t.checkpoint()
t[0] = 'xyz'
assert t[0] == 'xyz'
t.revert()
assert t[0] == 'abcde'
if __name__ == '__main__':
unittest.main()
|
matheus2740/alpha_empire
|
dgvm/tests/heap_tests.py
|
Python
|
gpl-3.0
| 518
|
#!/usr/bin/env python
# cardinal_pythonlib/sizeformatter.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
"""
from typing import Union
def sizeof_fmt(num: float, suffix: str = 'B') -> str:
"""
Formats a number of bytes in a human-readable binary format (e.g. ``2048``
becomes ``'2 KiB'``); from https://stackoverflow.com/questions/1094841.
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
# see: https://en.wikipedia.org/wiki/Binary_prefix
SYMBOLS = {
'customary': ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
def bytes2human(n: Union[int, float],
format: str = '%(value).1f %(symbol)s',
symbols: str = 'customary') -> str:
"""
Converts a number of bytes into a human-readable format.
From https://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Args:
n: number of bytes
format: a format specification string
symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"``
or ``"iec_ext"``; see https://goo.gl/kTQMs
Returns:
the formatted number
Examples:
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
""" # noqa
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def human2bytes(s: str) -> int:
"""
Modified from
https://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format, :exc:`ValueError` is raised.
>>> human2bytes('0 B')
0
>>> human2bytes('1 K')
1024
>>> human2bytes('1 M')
1048576
>>> human2bytes('1 Gi')
1073741824
>>> human2bytes('1 tera')
1099511627776
>>> human2bytes('0.5kilo')
512
>>> human2bytes('0.1 byte')
0
>>> human2bytes('1 k') # k is an alias for K
1024
>>> human2bytes('12 foo')
Traceback (most recent call last):
...
ValueError: can't interpret '12 foo'
""" # noqa
if not s:
raise ValueError(f"Can't interpret {s!r} as integer")
try:
return int(s)
except ValueError:
pass
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
letter = s.strip()
for name, sset in SYMBOLS.items():
if letter in sset:
break
else:
if letter == 'k':
# treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa
sset = SYMBOLS['customary']
letter = letter.upper()
else:
raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter])
|
RudolfCardinal/pythonlib
|
cardinal_pythonlib/sizeformatter.py
|
Python
|
apache-2.0
| 5,274
|
#
# This file is part of Scalable COncurrent Operations in Python (SCOOP).
#
# SCOOP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# SCOOP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SCOOP. If not, see <http://www.gnu.org/licenses/>.
#
"""
This example shows a way to parallelize binary tree traversal.
"""
import random
import sys
from itertools import cycle
from scoop import futures, shared
def maxTreeDepthDivide(rootValue, currentDepth=0, parallelLevel=2):
"""Finds a tree node that represents rootValue and computes the max depth
of this tree branch.
This function will emit new futures until currentDepth=parallelLevel"""
thisRoot = shared.getConst('myTree').search(rootValue)
if currentDepth >= parallelLevel:
return thisRoot.maxDepth(currentDepth)
else:
# Base case
if not any([thisRoot.left, thisRoot.right]):
return currentDepth
if not all([thisRoot.left, thisRoot.right]):
return thisRoot.maxDepth(currentDepth)
# Parallel recursion
return max(
futures.map(
maxTreeDepthDivide,
[
thisRoot.left.payload,
thisRoot.right.payload,
],
cycle([currentDepth + 1]),
cycle([parallelLevel]),
)
)
class BinaryTreeNode(object):
"""A simple binary tree."""
def __init__(self, payload=None, left=None, right=None):
self.payload = payload
self.left = left
self.right = right
def insert(self, value):
"""Insert a value in the tree"""
if not self.payload or value == self.payload:
self.payload = value
else:
if value <= self.payload:
if self.left:
self.left.insert(value)
else:
self.left = BinaryTreeNode(value)
else:
if self.right:
self.right.insert(value)
else:
self.right = BinaryTreeNode(value)
def maxDepth(self, currentDepth=0):
"""Compute the depth of the longest branch of the tree"""
if not any((self.left, self.right)):
return currentDepth
result = 0
for child in (self.left, self.right):
if child:
result = max(result, child.maxDepth(currentDepth + 1))
return result
def search(self, value):
"""Find an element in the tree"""
if self.payload == value:
return self
else:
if value <= self.payload:
if self.left:
return self.left.search(value)
else:
if self.right:
return self.right.search(value)
return None
if __name__ == '__main__':
import time
print("Beginning Tree generation.")
# Generate the same tree on every workers.
random.seed(314159265)
exampleTree = BinaryTreeNode(0)
for _ in range(128000):
exampleTree.insert(random.randint(-sys.maxsize - 1, sys.maxsize))
shared.setConst(myTree=exampleTree)
print("Tree generation done.")
# Splits the tree in two and process the left and right branches parallely
ts = time.time()
presult = max(
futures.map(
maxTreeDepthDivide,
[exampleTree.payload],
parallelLevel=2,
)
)
pts = time.time() - ts
# Serial computation of tree depth
ts = time.time()
sresult = exampleTree.maxDepth()
sts = time.time() - ts
print("Parallel result: {0}".format(presult))
print("Serial result: {0}".format(sresult))
print("Parallel time: {0:.5f}s".format(pts))
print("Serial time: {0:.5f}s".format(sts))
assert presult == sresult
|
IGITUGraz/scoop
|
examples/tree_traversal.py
|
Python
|
lgpl-3.0
| 4,364
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning, DataflowCreateJavaJobOperator, DataflowCreatePythonJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.version import version
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
PY_INTERPRETER = 'python3'
JAR_FILE = 'gs://my-bucket/example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.providers.google.cloud.operators.dataflow.{}'
TEST_LOCATION = "custom-location"
class TestDataflowPythonOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreatePythonJobOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION
)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.py_interpreter, PY_INTERPRETER)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_provide_file.assert_called_once_with(object_url=PY_FILE)
start_python_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
dataflow=mock.ANY,
py_options=PY_OPTIONS,
py_interpreter=PY_INTERPRETER,
py_requirements=[],
py_system_site_packages=False,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class TestDataflowJavaOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreateJavaJobOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION
)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
self.assertEqual(self.dataflow.check_if_running, CheckJobRunning.WaitForRun)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = CheckJobRunning.IgnoreJob
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=None,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = True
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_not_called()
start_java_hook.assert_not_called()
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_not_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=None,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_multiple_job_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check multiple jobs
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.multiple_jobs = True
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=True,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION
)
class TestDataflowTemplateOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplatedJobStartOperator(
task_id=TASK_ID,
template=TEMPLATE,
job_name=JOB_NAME,
parameters=PARAMETERS,
options=DEFAULT_OPTIONS_TEMPLATE,
dataflow_default_options={"EXTRA_OPTION": "TEST_A"},
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_exec(self, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f',
'EXTRA_OPTION': "TEST_A"
}
start_template_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
parameters=PARAMETERS,
dataflow_template=TEMPLATE,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
|
wooga/airflow
|
tests/providers/google/cloud/operators/test_dataflow.py
|
Python
|
apache-2.0
| 11,898
|
""" base on http://www.ibm.com/developerworks/cn/opensource/os-cn-pythonwith/index.html
contextlib:closing demo
"""
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class ClosingDemo(object):
""" class supporting closing
Question: NO contextlib neither contextmanager? """
def __init__(self):
self.acquire()
def acquire(self):
print('Acquire resources.')
def free(self):
print('Clean up any resources acquired.')
def close(self):
self.free()
if __name__ == '__main__':
with closing(ClosingDemo()):
print('Using resources')
|
nwinds/py-with-study
|
contextlib_closing_demo.py
|
Python
|
mit
| 744
|
import tensorflow as tf
def python_run():
"""
Pythonic way of getting the ``new width`` and ``new height``
"""
input_shape = [32, 32, 3] # HxWxD
filter_shape = [20, 8, 8, 3] # number_of_filtersxHxWxD
stride = 2 # S
valid_padding = 1 # P
new_height = (input_shape[0] - filter_shape[1] + 2 * valid_padding) / stride + 1
new_width = (input_shape[1] - filter_shape[2] + 2 * valid_padding) / stride + 1
new_depth = filter_shape[0] # number of filters is the depth
print("{}x{}x{}".format(new_height, new_width, new_depth))
def tensor_run():
"""
Running convolution in 2D with TensorFlow.
"""
inputs = tf.placeholder(tf.float32, (None, 32, 32, 3))
filter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20))) # (height, width, input_depth, output_depth)
filter_bias = tf.Variable(tf.zeros(20))
strides = [1, 2, 2, 1] # (batch, height, width, depth)
padding = 'VALID'
conv = tf.nn.conv2d(inputs, filter_weights, strides, padding) + filter_bias
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(conv, feed_dict={}) # Exception will occur, not a complete code.
if __name__ == '__main__':
python_run()
tensor_run()
|
akshaybabloo/Car-ND
|
Term_1/CNN_5/output_shape_1.py
|
Python
|
mit
| 1,273
|
#TODO: Make work with new post_types
import datetime
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext as _
from django.utils.html import escape
from askbot import const
from django.core.urlresolvers import reverse
class VoteManager(models.Manager):
def get_up_vote_count_from_user(self, user):
if user is not None:
return self.filter(user=user, vote=1).count()
else:
return 0
def get_down_vote_count_from_user(self, user):
if user is not None:
return self.filter(user=user, vote=-1).count()
else:
return 0
def get_votes_count_today_from_user(self, user):
if user is not None:
today = datetime.date.today()
return self.filter(user=user, voted_at__range=(today, today + datetime.timedelta(1))).count()
else:
return 0
class Vote(models.Model):
VOTE_UP = +1
VOTE_DOWN = -1
VOTE_CHOICES = (
(VOTE_UP, u'Up'),
(VOTE_DOWN, u'Down'),
)
user = models.ForeignKey('auth.User', related_name='votes')
voted_post = models.ForeignKey('Post', related_name='votes')
vote = models.SmallIntegerField(choices=VOTE_CHOICES)
voted_at = models.DateTimeField(default=datetime.datetime.now)
objects = VoteManager()
class Meta:
unique_together = ('user', 'voted_post')
app_label = 'askbot'
db_table = u'vote'
def __unicode__(self):
return '[%s] voted at %s: %s' %(self.user, self.voted_at, self.vote)
def __int__(self):
"""1 if upvote -1 if downvote"""
return self.vote
def is_upvote(self):
return self.vote == self.VOTE_UP
def is_downvote(self):
return self.vote == self.VOTE_DOWN
def is_opposite(self, vote_type):
assert(vote_type in (self.VOTE_UP, self.VOTE_DOWN))
return self.vote != vote_type
def cancel(self):
"""cancel the vote
while taking into account whether vote was up
or down
return change in score on the post
"""
#importing locally because of circular dependency
from askbot import auth
score_before = self.voted_post.points
if self.vote > 0:
# cancel upvote
auth.onUpVotedCanceled(self, self.voted_post, self.user)
else:
# cancel downvote
auth.onDownVotedCanceled(self, self.voted_post, self.user)
score_after = self.voted_post.points
return score_after - score_before
class BadgeData(models.Model):
"""Awarded for notable actions performed on the site by Users."""
slug = models.SlugField(max_length=50, unique=True)
awarded_count = models.PositiveIntegerField(default=0)
awarded_to = models.ManyToManyField(User, through='Award', related_name='badges')
def _get_meta_data(self):
"""retrieves badge metadata stored
in a file"""
from askbot.models import badges
return badges.get_badge(self.slug)
@property
def name(self):
return self._get_meta_data().name
@property
def description(self):
return self._get_meta_data().description
@property
def css_class(self):
return self._get_meta_data().css_class
def get_type_display(self):
#todo - rename "type" -> "level" in this model
return self._get_meta_data().get_level_display()
class Meta:
app_label = 'askbot'
ordering = ('slug',)
def __unicode__(self):
return u'%s: %s' % (self.get_type_display(), self.slug)
def get_absolute_url(self):
return '%s%s/' % (reverse('badge', args=[self.id]), self.slug)
class AwardManager(models.Manager):
def get_recent_awards(self):
awards = super(AwardManager, self).extra(
select={'badge_id': 'badge.id', 'badge_name':'badge.name',
'badge_description': 'badge.description', 'badge_type': 'badge.type',
'user_id': 'auth_user.id', 'user_name': 'auth_user.username'
},
tables=['award', 'badge', 'auth_user'],
order_by=['-awarded_at'],
where=['auth_user.id=award.user_id AND badge_id=badge.id'],
).values('badge_id', 'badge_name', 'badge_description', 'badge_type', 'user_id', 'user_name')
return awards
class Award(models.Model):
"""The awarding of a Badge to a User."""
user = models.ForeignKey(User, related_name='award_user')
badge = models.ForeignKey(BadgeData, related_name='award_badge')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
awarded_at = models.DateTimeField(default=datetime.datetime.now)
notified = models.BooleanField(default=False)
objects = AwardManager()
def __unicode__(self):
return u'[%s] is awarded a badge [%s] at %s' % (self.user.username, self.badge.name, self.awarded_at)
class Meta:
app_label = 'askbot'
db_table = u'award'
class ReputeManager(models.Manager):
def get_reputation_by_upvoted_today(self, user):
"""
For one user in one day, he can only earn rep till certain score (ep. +200)
by upvoted(also subtracted from upvoted canceled). This is because we need
to prohibit gaming system by upvoting/cancel again and again.
"""
if user is None:
return 0
else:
today = datetime.date.today()
tomorrow = today + datetime.timedelta(1)
rep_types = (1,-8)
sums = self.filter(models.Q(reputation_type__in=rep_types),
user=user,
reputed_at__range=(today, tomorrow),
).aggregate(models.Sum('positive'), models.Sum('negative'))
if sums:
pos = sums['positive__sum']
neg = sums['negative__sum']
if pos is None:
pos = 0
if neg is None:
neg = 0
return pos + neg
else:
return 0
class Repute(models.Model):
"""The reputation histories for user"""
user = models.ForeignKey(User)
#todo: combine positive and negative to one value
positive = models.SmallIntegerField(default=0)
negative = models.SmallIntegerField(default=0)
exercise = models.ForeignKey('Post', null=True, blank=True)
reputed_at = models.DateTimeField(default=datetime.datetime.now)
reputation_type = models.SmallIntegerField(choices=const.TYPE_REPUTATION)
reputation = models.IntegerField(default=1)
#comment that must be used if reputation_type == 10
#assigned_by_moderator - so that reason can be displayed
#in that case Exercise field will be blank
comment = models.CharField(max_length=128, null=True)
objects = ReputeManager()
def __unicode__(self):
return u'[%s]\' reputation changed at %s' % (self.user.username, self.reputed_at)
class Meta:
app_label = 'askbot'
db_table = u'repute'
def get_explanation_snippet(self):
"""returns HTML snippet with a link to related exercise
or a text description for a the reason of the reputation change
in the implementation description is returned only
for Repute.reputation_type == 10 - "assigned by the moderator"
part of the purpose of this method is to hide this idiosyncracy
"""
if self.reputation_type == 10:#todo: hide magic number
return _('<em>Changed by moderator. Reason:</em> %(reason)s') \
% {'reason':self.comment}
else:
delta = self.positive - self.negative
link_title_data = {
'points': abs(delta),
'username': self.user.username,
'exercise_title': self.exercise.thread.title
}
if delta > 0:
link_title = _(
'%(points)s points were added for %(username)s\'s '
'contribution to exercise %(exercise_title)s'
) % link_title_data
else:
link_title = _(
'%(points)s points were subtracted for %(username)s\'s '
'contribution to exercise %(exercise_title)s'
) % link_title_data
return '<a href="%(url)s" title="%(link_title)s">%(exercise_title)s</a>' \
% {
'url': self.exercise.get_absolute_url(),
'exercise_title': escape(self.exercise.thread.title),
'link_title': escape(link_title)
}
|
maxwward/SCOPEBak
|
askbot/models/repute.py
|
Python
|
gpl-3.0
| 9,241
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=["camera_node"],
package_dir={'': 'include'},
)
setup(**setup_args)
|
Spookz0r/Temporal_Insanity
|
Gabriel/Code/ros_workspace/src/camera_node/setup.py
|
Python
|
mit
| 306
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: 周日 四月 21 02:39:05 2013
# by: The Resource Compiler for PyQt (Qt v4.8.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\x57\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\
\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\
\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\
\x46\x00\x00\x00\xdd\x49\x44\x41\x54\x78\xda\x5c\x8e\xb1\x4e\x84\
\x40\x18\x84\x67\xef\x4c\x2c\xc8\xd9\x2c\x0d\x58\x50\x1b\x0b\xc3\
\xfa\x24\x77\xbd\x0d\x85\x4f\x40\x0b\xbb\xcb\x3b\xd0\x68\x41\x72\
\xc5\xd2\x28\x4f\x02\xcf\xb1\x97\x40\x61\xd4\xc2\xc4\x62\x2c\xbc\
\x4d\xd0\x49\xfe\xbf\xf8\x32\xff\x3f\x23\x48\xc2\x5a\x3b\x00\x80\
\xd6\xfa\x80\xb3\xac\xb5\x03\x49\x18\x63\x0e\x5b\x21\xc4\x90\xe7\
\xf9\x3e\x49\x92\x9b\xbe\xef\xef\xca\xb2\x7c\xf5\xde\xbf\x04\xe6\
\x9c\xbb\xbd\x20\xf9\x19\xae\x95\x52\xfb\x2c\xcb\xbe\xa5\x94\x01\
\x81\xe4\x9b\x38\xbf\x3c\x2a\xa5\x1e\xf0\x4f\xe3\x38\x3e\x37\x4d\
\xf3\x28\x48\x02\x00\xba\xae\x7b\x97\x52\xee\x82\x61\x59\x96\x8f\
\xa2\x28\xae\x00\x60\x03\x00\xc6\x98\xe3\xda\x00\x00\x71\x1c\xef\
\xb4\xd6\x4f\x00\xb0\x05\xf0\x27\x6a\x9e\x67\x44\x51\x04\x00\x48\
\xd3\xf4\xde\x39\x77\xbd\x21\xf9\xb5\xea\x70\x6a\xdb\xf6\x72\x9a\
\xa6\xd3\xaa\xf8\xef\xaa\xeb\xda\x57\x55\xe5\x49\x22\xcc\x9a\xfd\
\x0c\x00\x24\xab\x6e\xfa\x96\x21\xfc\xb8\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x04\x00\x00\x00\x23\x93\x3e\x53\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x03\x18\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x63\x60\x60\x9e\xe0\xe8\xe2\xe4\xca\x24\
\xc0\xc0\x50\x50\x54\x52\xe4\x1e\xe4\x18\x19\x11\x19\xa5\xc0\x7e\
\x9e\x81\x8d\x81\x99\x81\x81\x81\x81\x81\x21\x31\xb9\xb8\xc0\x31\
\x20\xc0\x87\x81\x81\x81\x21\x2f\x3f\x2f\x95\x01\x15\x30\x32\x30\
\x7c\xbb\xc6\xc0\xc8\xc0\xc0\xc0\x70\x59\xd7\xd1\xc5\xc9\x95\x81\
\x34\xc0\x9a\x5c\x50\x54\xc2\xc0\xc0\x70\x80\x81\x81\xc1\x28\x25\
\xb5\x38\x99\x81\x81\xe1\x0b\x03\x03\x43\x7a\x79\x49\x41\x09\x03\
\x03\x63\x0c\x03\x03\x83\x48\x52\x76\x41\x09\x03\x03\x63\x01\x03\
\x03\x83\x48\x76\x48\x90\x33\x03\x03\x63\x0b\x03\x03\x13\x4f\x49\
\x6a\x45\x09\x03\x03\x03\x83\x73\x7e\x41\x65\x51\x66\x7a\x46\x89\
\x82\xa1\xa5\xa5\xa5\x82\x63\x4a\x7e\x52\xaa\x42\x70\x65\x71\x49\
\x6a\x6e\xb1\x82\x67\x5e\x72\x7e\x51\x41\x7e\x51\x62\x49\x6a\x0a\
\x03\x03\x03\xd4\x0e\x06\x06\x06\x06\x5e\x97\xfc\x12\x05\xf7\xc4\
\xcc\x3c\x05\x23\x03\x55\x06\x2a\x83\x88\xc8\x28\x05\x08\x0b\x11\
\x3e\x08\x31\x04\x48\x2e\x2d\x2a\x83\x07\x25\x03\x83\x00\x83\x02\
\x83\x01\x83\x03\x43\x00\x43\x22\x43\x3d\xc3\x02\x86\xa3\x0c\x6f\
\x18\xc5\x19\x5d\x18\x4b\x19\x57\x30\xde\x63\x12\x63\x0a\x62\x9a\
\xc0\x74\x81\x59\x98\x39\x92\x79\x21\xf3\x1b\x16\x4b\x96\x0e\x96\
\x5b\xac\x7a\xac\xad\xac\xf7\xd8\x2c\xd9\xa6\xb1\x7d\x63\x0f\x67\
\xdf\xcd\xa1\xc4\xd1\xc5\xf1\x85\x33\x91\xf3\x02\x97\x23\xd7\x16\
\x6e\x4d\xee\x05\x3c\x52\x3c\x53\x79\x85\x78\x27\xf1\x09\xf3\x4d\
\xe3\x97\xe1\x5f\x2c\xa0\x23\xb0\x43\xd0\x55\xf0\x8a\x50\xaa\xd0\
\x0f\xe1\x5e\x11\x15\x91\xbd\xa2\xe1\xa2\x5f\xc4\x26\x89\x1b\x89\
\x5f\x91\xa8\x90\x94\x93\x3c\x26\x95\x2f\x2d\x2d\x7d\x42\xa6\x4c\
\x56\x5d\xf6\x96\x5c\x9f\xbc\x8b\xfc\x1f\x85\xad\x8a\x85\x4a\x7a\
\x4a\x6f\x95\xd7\xaa\x14\xa8\x9a\xa8\xfe\x54\x3b\xa8\xde\xa5\x11\
\xaa\xa9\xa4\xf9\x41\xeb\x80\xf6\x24\x9d\x54\x5d\x2b\x3d\x41\xbd\
\x57\xfa\x47\x0c\x16\x18\xd6\x1a\xc5\x18\xdb\x9a\xc8\x9b\x32\x9b\
\xbe\x34\xbb\x60\xbe\xd3\x62\x89\xe5\x04\xab\x3a\xeb\x5c\x9b\x38\
\xdb\x40\x3b\x57\x7b\x6b\x07\x63\x47\x1d\x27\x35\x67\x25\x17\x05\
\x57\x79\x37\x05\x77\x65\x0f\x75\x4f\x5d\x2f\x13\x6f\x1b\x1f\x77\
\xdf\x60\xbf\x04\xff\xfc\x80\xfa\xc0\x89\x41\x4b\x83\x77\x85\x5c\
\x0c\x7d\x19\xce\x14\x21\x17\x69\x15\x15\x11\x5d\x11\x33\x33\x76\
\x4f\xdc\x83\x04\xb6\x44\xdd\xa4\xb0\xe4\x86\x94\x35\xa9\x37\xd3\
\x39\x32\x2c\x32\x33\xb3\xe6\x66\x5f\xcc\x65\xcf\xb3\xcf\xaf\x28\
\xd8\x54\xf8\xae\x58\xbb\x24\xab\x74\x55\xd9\x9b\x0a\xfd\xca\x92\
\xaa\x5d\x35\x8c\xb5\x5e\x75\x53\xeb\x1f\x36\xea\x35\xd5\x34\x9f\
\x6d\x95\x6b\x2b\x6c\x3f\xda\x29\xdd\x55\xd4\x7d\xba\x57\xb5\xaf\
\xb1\xff\xee\x44\x9b\x49\xb3\x27\xff\x9d\x1a\x3f\xed\xf0\x0c\x8d\
\x99\xfd\xb3\xbe\xcf\x49\x98\x7b\x7a\xbe\xf9\x82\xa5\x8b\x44\x16\
\xb7\x2e\xf9\xb6\x2c\x73\xf9\xbd\x95\x21\xab\x4e\xaf\x71\x59\xbb\
\x6f\xbd\xe5\x86\x6d\x9b\x4c\x36\x6f\xd9\x6a\xb2\x6d\xfb\x0e\xab\
\x9d\xfb\x77\xbb\xee\x39\xbb\x2f\x6c\xff\x83\x83\x39\x87\x7e\x1e\
\x69\x3f\x26\x7e\x7c\xc5\x49\xeb\x53\xe7\xce\x24\x9f\xfd\x75\x7e\
\xd2\x45\xed\x4b\x47\xaf\x24\x5e\xfd\x77\x7d\xce\x4d\x9b\x5b\x77\
\xef\xd4\xdf\x53\xbe\x7f\xe2\x61\xde\x63\xb1\x27\xfb\x9f\x65\xbe\
\x10\x79\x79\xf0\x75\xfe\x5b\xf9\x77\x17\x3e\x34\x7d\x32\xfd\xfc\
\xea\xeb\x82\xef\xe1\x3f\x05\x7e\x9d\xfa\xd3\xfa\xcf\xf1\xff\x7f\
\x00\x0d\x00\x0f\x34\xfa\x96\xf1\x5d\x00\x00\x00\x20\x63\x48\x52\
\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\
\xe9\x00\x00\x75\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\
\x6f\x92\x5f\xc5\x46\x00\x00\x00\x52\x49\x44\x41\x54\x78\xda\x62\
\x58\xf5\xe9\xca\x3f\x18\x5c\xfe\x9e\x21\xd3\xff\xc4\x8f\xab\xbf\
\xaf\xfe\xbe\xfa\xfb\xd0\x97\x68\x63\x86\xff\x0c\x85\x6b\xf7\x7e\
\xdc\xfb\x71\xf3\x87\xcc\xbc\xff\x0c\x0c\xff\x19\x18\x98\x73\xce\
\xce\xbd\x1f\x39\xff\x3f\xc3\x7f\x06\x86\xff\x0c\xff\x19\x14\xdd\
\x2c\xb6\xfe\x67\xf8\xcf\xf0\x9f\x01\x30\x00\x6a\x5f\x2c\x67\x74\
\xda\xec\xfb\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x15\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x06\x08\x06\x00\x00\x00\xe0\xcc\xef\x48\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0a\x4f\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x9d\x53\x67\x54\x53\xe9\x16\x3d\xf7\xde\
\xf4\x42\x4b\x88\x80\x94\x4b\x6f\x52\x15\x08\x20\x52\x42\x8b\x80\
\x14\x91\x26\x2a\x21\x09\x10\x4a\x88\x21\xa1\xd9\x15\x51\xc1\x11\
\x45\x45\x04\x1b\xc8\xa0\x88\x03\x8e\x8e\x80\x8c\x15\x51\x2c\x0c\
\x8a\x0a\xd8\x07\xe4\x21\xa2\x8e\x83\xa3\x88\x8a\xca\xfb\xe1\x7b\
\xa3\x6b\xd6\xbc\xf7\xe6\xcd\xfe\xb5\xd7\x3e\xe7\xac\xf3\x9d\xb3\
\xcf\x07\xc0\x08\x0c\x96\x48\x33\x51\x35\x80\x0c\xa9\x42\x1e\x11\
\xe0\x83\xc7\xc4\xc6\xe1\xe4\x2e\x40\x81\x0a\x24\x70\x00\x10\x08\
\xb3\x64\x21\x73\xfd\x23\x01\x00\xf8\x7e\x3c\x3c\x2b\x22\xc0\x07\
\xbe\x00\x01\x78\xd3\x0b\x08\x00\xc0\x4d\x9b\xc0\x30\x1c\x87\xff\
\x0f\xea\x42\x99\x5c\x01\x80\x84\x01\xc0\x74\x91\x38\x4b\x08\x80\
\x14\x00\x40\x7a\x8e\x42\xa6\x00\x40\x46\x01\x80\x9d\x98\x26\x53\
\x00\xa0\x04\x00\x60\xcb\x63\x62\xe3\x00\x50\x2d\x00\x60\x27\x7f\
\xe6\xd3\x00\x80\x9d\xf8\x99\x7b\x01\x00\x5b\x94\x21\x15\x01\xa0\
\x91\x00\x20\x13\x65\x88\x44\x00\x68\x3b\x00\xac\xcf\x56\x8a\x45\
\x00\x58\x30\x00\x14\x66\x4b\xc4\x39\x00\xd8\x2d\x00\x30\x49\x57\
\x66\x48\x00\xb0\xb7\x00\xc0\xce\x10\x0b\xb2\x00\x08\x0c\x00\x30\
\x51\x88\x85\x29\x00\x04\x7b\x00\x60\xc8\x23\x23\x78\x00\x84\x99\
\x00\x14\x46\xf2\x57\x3c\xf1\x2b\xae\x10\xe7\x2a\x00\x00\x78\x99\
\xb2\x3c\xb9\x24\x39\x45\x81\x5b\x08\x2d\x71\x07\x57\x57\x2e\x1e\
\x28\xce\x49\x17\x2b\x14\x36\x61\x02\x61\x9a\x40\x2e\xc2\x79\x99\
\x19\x32\x81\x34\x0f\xe0\xf3\xcc\x00\x00\xa0\x91\x15\x11\xe0\x83\
\xf3\xfd\x78\xce\x0e\xae\xce\xce\x36\x8e\xb6\x0e\x5f\x2d\xea\xbf\
\x06\xff\x22\x62\x62\xe3\xfe\xe5\xcf\xab\x70\x40\x00\x00\xe1\x74\
\x7e\xd1\xfe\x2c\x2f\xb3\x1a\x80\x3b\x06\x80\x6d\xfe\xa2\x25\xee\
\x04\x68\x5e\x0b\xa0\x75\xf7\x8b\x66\xb2\x0f\x40\xb5\x00\xa0\xe9\
\xda\x57\xf3\x70\xf8\x7e\x3c\x3c\x45\xa1\x90\xb9\xd9\xd9\xe5\xe4\
\xe4\xd8\x4a\xc4\x42\x5b\x61\xca\x57\x7d\xfe\x67\xc2\x5f\xc0\x57\
\xfd\x6c\xf9\x7e\x3c\xfc\xf7\xf5\xe0\xbe\xe2\x24\x81\x32\x5d\x81\
\x47\x04\xf8\xe0\xc2\xcc\xf4\x4c\xa5\x1c\xcf\x92\x09\x84\x62\xdc\
\xe6\x8f\x47\xfc\xb7\x0b\xff\xfc\x1d\xd3\x22\xc4\x49\x62\xb9\x58\
\x2a\x14\xe3\x51\x12\x71\x8e\x44\x9a\x8c\xf3\x32\xa5\x22\x89\x42\
\x92\x29\xc5\x25\xd2\xff\x64\xe2\xdf\x2c\xfb\x03\x3e\xdf\x35\x00\
\xb0\x6a\x3e\x01\x7b\x91\x2d\xa8\x5d\x63\x03\xf6\x4b\x27\x10\x58\
\x74\xc0\xe2\xf7\x00\x00\xf2\xbb\x6f\xc1\xd4\x28\x08\x03\x80\x68\
\x83\xe1\xcf\x77\xff\xef\x3f\xfd\x47\xa0\x25\x00\x80\x66\x49\x92\
\x71\x00\x00\x5e\x44\x24\x2e\x54\xca\xb3\x3f\xc7\x08\x00\x00\x44\
\xa0\x81\x2a\xb0\x41\x1b\xf4\xc1\x18\x2c\xc0\x06\x1c\xc1\x05\xdc\
\xc1\x0b\xfc\x60\x36\x84\x42\x24\xc4\xc2\x42\x10\x42\x0a\x64\x80\
\x1c\x72\x60\x29\xac\x82\x42\x28\x86\xcd\xb0\x1d\x2a\x60\x2f\xd4\
\x40\x1d\x34\xc0\x51\x68\x86\x93\x70\x0e\x2e\xc2\x55\xb8\x0e\x3d\
\x70\x0f\xfa\x61\x08\x9e\xc1\x28\xbc\x81\x09\x04\x41\xc8\x08\x13\
\x61\x21\xda\x88\x01\x62\x8a\x58\x23\x8e\x08\x17\x99\x85\xf8\x21\
\xc1\x48\x04\x12\x8b\x24\x20\xc9\x88\x14\x51\x22\x4b\x91\x35\x48\
\x31\x52\x8a\x54\x20\x55\x48\x1d\xf2\x3d\x72\x02\x39\x87\x5c\x46\
\xba\x91\x3b\xc8\x00\x32\x82\xfc\x86\xbc\x47\x31\x94\x81\xb2\x51\
\x3d\xd4\x0c\xb5\x43\xb9\xa8\x37\x1a\x84\x46\xa2\x0b\xd0\x64\x74\
\x31\x9a\x8f\x16\xa0\x9b\xd0\x72\xb4\x1a\x3d\x8c\x36\xa1\xe7\xd0\
\xab\x68\x0f\xda\x8f\x3e\x43\xc7\x30\xc0\xe8\x18\x07\x33\xc4\x6c\
\x30\x2e\xc6\xc3\x42\xb1\x38\x2c\x09\x93\x63\xcb\xb1\x22\xac\x0c\
\xab\xc6\x1a\xb0\x56\xac\x03\xbb\x89\xf5\x63\xcf\xb1\x77\x04\x12\
\x81\x45\xc0\x09\x36\x04\x77\x42\x20\x61\x1e\x41\x48\x58\x4c\x58\
\x4e\xd8\x48\xa8\x20\x1c\x24\x34\x11\xda\x09\x37\x09\x03\x84\x51\
\xc2\x27\x22\x93\xa8\x4b\xb4\x26\xba\x11\xf9\xc4\x18\x62\x32\x31\
\x87\x58\x48\x2c\x23\xd6\x12\x8f\x13\x2f\x10\x7b\x88\x43\xc4\x37\
\x24\x12\x89\x43\x32\x27\xb9\x90\x02\x49\xb1\xa4\x54\xd2\x12\xd2\
\x46\xd2\x6e\x52\x23\xe9\x2c\xa9\x9b\x34\x48\x1a\x23\x93\xc9\xda\
\x64\x6b\xb2\x07\x39\x94\x2c\x20\x2b\xc8\x85\xe4\x9d\xe4\xc3\xe4\
\x33\xe4\x1b\xe4\x21\xf2\x5b\x0a\x9d\x62\x40\x71\xa4\xf8\x53\xe2\
\x28\x52\xca\x6a\x4a\x19\xe5\x10\xe5\x34\xe5\x06\x65\x98\x32\x41\
\x55\xa3\x9a\x52\xdd\xa8\xa1\x54\x11\x35\x8f\x5a\x42\xad\xa1\xb6\
\x52\xaf\x51\x87\xa8\x13\x34\x75\x9a\x39\xcd\x83\x16\x49\x4b\xa5\
\xad\xa2\x95\xd3\x1a\x68\x17\x68\xf7\x69\xaf\xe8\x74\xba\x11\xdd\
\x95\x1e\x4e\x97\xd0\x57\xd2\xcb\xe9\x47\xe8\x97\xe8\x03\xf4\x77\
\x0c\x0d\x86\x15\x83\xc7\x88\x67\x28\x19\x9b\x18\x07\x18\x67\x19\
\x77\x18\xaf\x98\x4c\xa6\x19\xd3\x8b\x19\xc7\x54\x30\x37\x31\xeb\
\x98\xe7\x99\x0f\x99\x6f\x55\x58\x2a\xb6\x2a\x7c\x15\x91\xca\x0a\
\x95\x4a\x95\x26\x95\x1b\x2a\x2f\x54\xa9\xaa\xa6\xaa\xde\xaa\x0b\
\x55\xf3\x55\xcb\x54\x8f\xa9\x5e\x53\x7d\xae\x46\x55\x33\x53\xe3\
\xa9\x09\xd4\x96\xab\x55\xaa\x9d\x50\xeb\x53\x1b\x53\x67\xa9\x3b\
\xa8\x87\xaa\x67\xa8\x6f\x54\x3f\xa4\x7e\x59\xfd\x89\x06\x59\xc3\
\x4c\xc3\x4f\x43\xa4\x51\xa0\xb1\x5f\xe3\xbc\xc6\x20\x0b\x63\x19\
\xb3\x78\x2c\x21\x6b\x0d\xab\x86\x75\x81\x35\xc4\x26\xb1\xcd\xd9\
\x7c\x76\x2a\xbb\x98\xfd\x1d\xbb\x8b\x3d\xaa\xa9\xa1\x39\x43\x33\
\x4a\x33\x57\xb3\x52\xf3\x94\x66\x3f\x07\xe3\x98\x71\xf8\x9c\x74\
\x4e\x09\xe7\x28\xa7\x97\xf3\x7e\x8a\xde\x14\xef\x29\xe2\x29\x1b\
\xa6\x34\x4c\xb9\x31\x65\x5c\x6b\xaa\x96\x97\x96\x58\xab\x48\xab\
\x51\xab\x47\xeb\xbd\x36\xae\xed\xa7\x9d\xa6\xbd\x45\xbb\x59\xfb\
\x81\x0e\x41\xc7\x4a\x27\x5c\x27\x47\x67\x8f\xce\x05\x9d\xe7\x53\
\xd9\x53\xdd\xa7\x0a\xa7\x16\x4d\x3d\x3a\xf5\xae\x2e\xaa\x6b\xa5\
\x1b\xa1\xbb\x44\x77\xbf\x6e\xa7\xee\x98\x9e\xbe\x5e\x80\x9e\x4c\
\x6f\xa7\xde\x79\xbd\xe7\xfa\x1c\x7d\x2f\xfd\x54\xfd\x6d\xfa\xa7\
\xf5\x47\x0c\x58\x06\xb3\x0c\x24\x06\xdb\x0c\xce\x18\x3c\xc5\x35\
\x71\x6f\x3c\x1d\x2f\xc7\xdb\xf1\x51\x43\x5d\xc3\x40\x43\xa5\x61\
\x95\x61\x97\xe1\x84\x91\xb9\xd1\x3c\xa3\xd5\x46\x8d\x46\x0f\x8c\
\x69\xc6\x5c\xe3\x24\xe3\x6d\xc6\x6d\xc6\xa3\x26\x06\x26\x21\x26\
\x4b\x4d\xea\x4d\xee\x9a\x52\x4d\xb9\xa6\x29\xa6\x3b\x4c\x3b\x4c\
\xc7\xcd\xcc\xcd\xa2\xcd\xd6\x99\x35\x9b\x3d\x31\xd7\x32\xe7\x9b\
\xe7\x9b\xd7\x9b\xdf\xb7\x60\x5a\x78\x5a\x2c\xb6\xa8\xb6\xb8\x65\
\x49\xb2\xe4\x5a\xa6\x59\xee\xb6\xbc\x6e\x85\x5a\x39\x59\xa5\x58\
\x55\x5a\x5d\xb3\x46\xad\x9d\xad\x25\xd6\xbb\xad\xbb\xa7\x11\xa7\
\xb9\x4e\x93\x4e\xab\x9e\xd6\x67\xc3\xb0\xf1\xb6\xc9\xb6\xa9\xb7\
\x19\xb0\xe5\xd8\x06\xdb\xae\xb6\x6d\xb6\x7d\x61\x67\x62\x17\x67\
\xb7\xc5\xae\xc3\xee\x93\xbd\x93\x7d\xba\x7d\x8d\xfd\x3d\x07\x0d\
\x87\xd9\x0e\xab\x1d\x5a\x1d\x7e\x73\xb4\x72\x14\x3a\x56\x3a\xde\
\x9a\xce\x9c\xee\x3f\x7d\xc5\xf4\x96\xe9\x2f\x67\x58\xcf\x10\xcf\
\xd8\x33\xe3\xb6\x13\xcb\x29\xc4\x69\x9d\x53\x9b\xd3\x47\x67\x17\
\x67\xb9\x73\x83\xf3\x88\x8b\x89\x4b\x82\xcb\x2e\x97\x3e\x2e\x9b\
\x1b\xc6\xdd\xc8\xbd\xe4\x4a\x74\xf5\x71\x5d\xe1\x7a\xd2\xf5\x9d\
\x9b\xb3\x9b\xc2\xed\xa8\xdb\xaf\xee\x36\xee\x69\xee\x87\xdc\x9f\
\xcc\x34\x9f\x29\x9e\x59\x33\x73\xd0\xc3\xc8\x43\xe0\x51\xe5\xd1\
\x3f\x0b\x9f\x95\x30\x6b\xdf\xac\x7e\x4f\x43\x4f\x81\x67\xb5\xe7\
\x23\x2f\x63\x2f\x91\x57\xad\xd7\xb0\xb7\xa5\x77\xaa\xf7\x61\xef\
\x17\x3e\xf6\x3e\x72\x9f\xe3\x3e\xe3\x3c\x37\xde\x32\xde\x59\x5f\
\xcc\x37\xc0\xb7\xc8\xb7\xcb\x4f\xc3\x6f\x9e\x5f\x85\xdf\x43\x7f\
\x23\xff\x64\xff\x7a\xff\xd1\x00\xa7\x80\x25\x01\x67\x03\x89\x81\
\x41\x81\x5b\x02\xfb\xf8\x7a\x7c\x21\xbf\x8e\x3f\x3a\xdb\x65\xf6\
\xb2\xd9\xed\x41\x8c\xa0\xb9\x41\x15\x41\x8f\x82\xad\x82\xe5\xc1\
\xad\x21\x68\xc8\xec\x90\xad\x21\xf7\xe7\x98\xce\x91\xce\x69\x0e\
\x85\x50\x7e\xe8\xd6\xd0\x07\x61\xe6\x61\x8b\xc3\x7e\x0c\x27\x85\
\x87\x85\x57\x86\x3f\x8e\x70\x88\x58\x1a\xd1\x31\x97\x35\x77\xd1\
\xdc\x43\x73\xdf\x44\xfa\x44\x96\x44\xde\x9b\x67\x31\x4f\x39\xaf\
\x2d\x4a\x35\x2a\x3e\xaa\x2e\x6a\x3c\xda\x37\xba\x34\xba\x3f\xc6\
\x2e\x66\x59\xcc\xd5\x58\x9d\x58\x49\x6c\x4b\x1c\x39\x2e\x2a\xae\
\x36\x6e\x6c\xbe\xdf\xfc\xed\xf3\x87\xe2\x9d\xe2\x0b\xe3\x7b\x17\
\x98\x2f\xc8\x5d\x70\x79\xa1\xce\xc2\xf4\x85\xa7\x16\xa9\x2e\x12\
\x2c\x3a\x96\x40\x4c\x88\x4e\x38\x94\xf0\x41\x10\x2a\xa8\x16\x8c\
\x25\xf2\x13\x77\x25\x8e\x0a\x79\xc2\x1d\xc2\x67\x22\x2f\xd1\x36\
\xd1\x88\xd8\x43\x5c\x2a\x1e\x4e\xf2\x48\x2a\x4d\x7a\x92\xec\x91\
\xbc\x35\x79\x24\xc5\x33\xa5\x2c\xe5\xb9\x84\x27\xa9\x90\xbc\x4c\
\x0d\x4c\xdd\x9b\x3a\x9e\x16\x9a\x76\x20\x6d\x32\x3d\x3a\xbd\x31\
\x83\x92\x91\x90\x71\x42\xaa\x21\x4d\x93\xb6\x67\xea\x67\xe6\x66\
\x76\xcb\xac\x65\x85\xb2\xfe\xc5\x6e\x8b\xb7\x2f\x1e\x95\x07\xc9\
\x6b\xb3\x90\xac\x05\x59\x2d\x0a\xb6\x42\xa6\xe8\x54\x5a\x28\xd7\
\x2a\x07\xb2\x67\x65\x57\x66\xbf\xcd\x89\xca\x39\x96\xab\x9e\x2b\
\xcd\xed\xcc\xb3\xca\xdb\x90\x37\x9c\xef\x9f\xff\xed\x12\xc2\x12\
\xe1\x92\xb6\xa5\x86\x4b\x57\x2d\x1d\x58\xe6\xbd\xac\x6a\x39\xb2\
\x3c\x71\x79\xdb\x0a\xe3\x15\x05\x2b\x86\x56\x06\xac\x3c\xb8\x8a\
\xb6\x2a\x6d\xd5\x4f\xab\xed\x57\x97\xae\x7e\xbd\x26\x7a\x4d\x6b\
\x81\x5e\xc1\xca\x82\xc1\xb5\x01\x6b\xeb\x0b\x55\x0a\xe5\x85\x7d\
\xeb\xdc\xd7\xed\x5d\x4f\x58\x2f\x59\xdf\xb5\x61\xfa\x86\x9d\x1b\
\x3e\x15\x89\x8a\xae\x14\xdb\x17\x97\x15\x7f\xd8\x28\xdc\x78\xe5\
\x1b\x87\x6f\xca\xbf\x99\xdc\x94\xb4\xa9\xab\xc4\xb9\x64\xcf\x66\
\xd2\x66\xe9\xe6\xde\x2d\x9e\x5b\x0e\x96\xaa\x97\xe6\x97\x0e\x6e\
\x0d\xd9\xda\xb4\x0d\xdf\x56\xb4\xed\xf5\xf6\x45\xdb\x2f\x97\xcd\
\x28\xdb\xbb\x83\xb6\x43\xb9\xa3\xbf\x3c\xb8\xbc\x65\xa7\xc9\xce\
\xcd\x3b\x3f\x54\xa4\x54\xf4\x54\xfa\x54\x36\xee\xd2\xdd\xb5\x61\
\xd7\xf8\x6e\xd1\xee\x1b\x7b\xbc\xf6\x34\xec\xd5\xdb\x5b\xbc\xf7\
\xfd\x3e\xc9\xbe\xdb\x55\x01\x55\x4d\xd5\x66\xd5\x65\xfb\x49\xfb\
\xb3\xf7\x3f\xae\x89\xaa\xe9\xf8\x96\xfb\x6d\x5d\xad\x4e\x6d\x71\
\xed\xc7\x03\xd2\x03\xfd\x07\x23\x0e\xb6\xd7\xb9\xd4\xd5\x1d\xd2\
\x3d\x54\x52\x8f\xd6\x2b\xeb\x47\x0e\xc7\x1f\xbe\xfe\x9d\xef\x77\
\x2d\x0d\x36\x0d\x55\x8d\x9c\xc6\xe2\x23\x70\x44\x79\xe4\xe9\xf7\
\x09\xdf\xf7\x1e\x0d\x3a\xda\x76\x8c\x7b\xac\xe1\x07\xd3\x1f\x76\
\x1d\x67\x1d\x2f\x6a\x42\x9a\xf2\x9a\x46\x9b\x53\x9a\xfb\x5b\x62\
\x5b\xba\x4f\xcc\x3e\xd1\xd6\xea\xde\x7a\xfc\x47\xdb\x1f\x0f\x9c\
\x34\x3c\x59\x79\x4a\xf3\x54\xc9\x69\xda\xe9\x82\xd3\x93\x67\xf2\
\xcf\x8c\x9d\x95\x9d\x7d\x7e\x2e\xf9\xdc\x60\xdb\xa2\xb6\x7b\xe7\
\x63\xce\xdf\x6a\x0f\x6f\xef\xba\x10\x74\xe1\xd2\x45\xff\x8b\xe7\
\x3b\xbc\x3b\xce\x5c\xf2\xb8\x74\xf2\xb2\xdb\xe5\x13\x57\xb8\x57\
\x9a\xaf\x3a\x5f\x6d\xea\x74\xea\x3c\xfe\x93\xd3\x4f\xc7\xbb\x9c\
\xbb\x9a\xae\xb9\x5c\x6b\xb9\xee\x7a\xbd\xb5\x7b\x66\xf7\xe9\x1b\
\x9e\x37\xce\xdd\xf4\xbd\x79\xf1\x16\xff\xd6\xd5\x9e\x39\x3d\xdd\
\xbd\xf3\x7a\x6f\xf7\xc5\xf7\xf5\xdf\x16\xdd\x7e\x72\x27\xfd\xce\
\xcb\xbb\xd9\x77\x27\xee\xad\xbc\x4f\xbc\x5f\xf4\x40\xed\x41\xd9\
\x43\xdd\x87\xd5\x3f\x5b\xfe\xdc\xd8\xef\xdc\x7f\x6a\xc0\x77\xa0\
\xf3\xd1\xdc\x47\xf7\x06\x85\x83\xcf\xfe\x91\xf5\x8f\x0f\x43\x05\
\x8f\x99\x8f\xcb\x86\x0d\x86\xeb\x9e\x38\x3e\x39\x39\xe2\x3f\x72\
\xfd\xe9\xfc\xa7\x43\xcf\x64\xcf\x26\x9e\x17\xfe\xa2\xfe\xcb\xae\
\x17\x16\x2f\x7e\xf8\xd5\xeb\xd7\xce\xd1\x98\xd1\xa1\x97\xf2\x97\
\x93\xbf\x6d\x7c\xa5\xfd\xea\xc0\xeb\x19\xaf\xdb\xc6\xc2\xc6\x1e\
\xbe\xc9\x78\x33\x31\x5e\xf4\x56\xfb\xed\xc1\x77\xdc\x77\x1d\xef\
\xa3\xdf\x0f\x4f\xe4\x7c\x20\x7f\x28\xff\x68\xf9\xb1\xf5\x53\xd0\
\xa7\xfb\x93\x19\x93\x93\xff\x04\x03\x98\xf3\xfc\x63\x33\x2d\xdb\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\x46\x00\x00\x00\x40\
\x49\x44\x41\x54\x78\xda\x5c\x8c\x31\x11\x00\x30\x08\xc4\x42\x2d\
\x20\x03\xfc\x2b\x61\x45\x02\x1a\xe8\x54\xae\x6d\xc6\xcf\x7d\xc4\
\xcc\x1a\x20\x22\x84\x8b\x05\x90\x99\xa8\x6a\xdf\x42\xba\x7b\xc6\
\xaa\x92\x47\x1c\xdc\x7d\xb2\x8b\x8f\x93\x7d\x1e\xc0\x64\xf7\x00\
\xf5\x9f\x1d\xd3\x02\x88\xef\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0c\
\x04\x56\x23\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0b\x2d\x87\xc7\
\x00\x68\
\x00\x61\x00\x6e\x00\x64\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x01\x5b\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x05\x4f\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
vivalivezhang/projectvfx
|
lib/darkorangeStylesheet/resource.py
|
Python
|
gpl-2.0
| 18,974
|
################################################################################
#
# File: cryostatclass.py
# Author: Anna Zovaro
# Email: anna.zovaro@anu.edu.au
#
# Description:
# A class for a cryostat.
#
# Copyright (C) 2016 Anna Zovaro
#
################################################################################
#
# This file is part of linguinesim.
#
# linguinesim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# linguinesim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with linguinesim. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from __future__ import division, print_function
################################################################################
class Cryostat(object):
def __init__(self,
T,
Tr_win,
Omega,
eps_wall
):
self.T = T
self.Tr_win = Tr_win
self.Omega = Omega
self.eps_wall = eps_wall
self.eps_win = 1 - self.Tr_win
|
azov3902/linguinesim
|
cryostatclass.py
|
Python
|
gpl-3.0
| 1,415
|
#coding: utf-8
#This file is part of Ficlatté.
#Copyright © 2015-2017 Paul Robertson, Jim Stitzel and Shu Sam Chen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU Affero General Public
# License as published by the Free Software Foundation
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from string import Template
from django.core.mail import send_mail
from django.urls import reverse
from django.conf import settings
from .models import *
#-----------------------------------------------------------------------------
def send_conf_email(profile, token):
url = getattr(settings, 'SITE_URL', 'http://www.example.com/')
mail_template = Template("""Hi.
This is the Ficlatte server. You, or someone claiming
to be you registered at https://ficlatte.com
If this really is you, please click on the following link:
$url/confirmation/yes/$profile_id/$token
If this is not you, click on the link below and we'll never
send you e-mail ever again:
$url/confirmation/no/$profile_id/$token
Best regards,
The Ficlatte team""")
mail_message = mail_template.substitute(url=url, profile_id=profile.id, token=token)
send_mail('Ficlatte e-mail confirmation',
mail_message,
'Ficlatte Team <noreply@ficlatte.com>',
[profile.email_addr],
fail_silently = False)
#-----------------------------------------------------------------------------
def send_notification_email(profile, subject, message):
send_mail(subject,
message,
'Ficlatte Team <noreply@ficlatte.com>',
[profile.email_addr],
fail_silently = False)
#-----------------------------------------------------------------------------
def send_notification_email_comment(com):
url = getattr(settings, 'SITE_URL', 'http://www.example.com/')
# Is the comment on a story, prompt, challenge, or blog?
if (com.story):
parent = com.story
parent_type = u'story'
subs = Subscription.objects.filter(story=parent)
parent_url = u'{}{}'.format(url, reverse('story', args=[parent.id]))
unsub_url = u'{}{}'.format(url, reverse('story-unsub', args=[parent.id]))
elif (com.prompt):
parent = com.prompt
parent_type = u'prompt'
subs = Subscription.objects.filter(prompt=parent)
parent_url = u'{}{}'.format(url, reverse('prompt', args=[parent.id]))
unsub_url = u'{}{}'.format(url, reverse('prompt-unsub', args=[parent.id]))
elif (com.challenge):
parent = com.challenge
parent_type = u'challenge'
subs = Subscription.objects.filter(challenge=parent)
parent_url = u'{}{}'.format(url, reverse('challenge', args=[parent.id]))
unsub_url = u'{}{}'.format(url, reverse('challenge-unsub', args=[parent.id]))
elif (com.blog):
parent = com.blog
parent_type = u'blog'
subs = Subscription.objects.filter(blog=parent)
parent_url = u'{}{}'.format(url, reverse('blog', args=[parent.id]))
unsub_url = u'{}{}'.format(url, reverse('blog-unsub', args=[parent.id]))
else:
# Not a blog, prompt, challenge, or story, something weird is going on,
# so just bug out here
return None
# Build e-mail text
subject = Template('Ficlatte comment on $parent_title by $comment_user').substitute(parent_title=parent.title, comment_user=com.user.pen_name)
message_template = Template("""Hi.
This is the Ficlatte server. You are currently subscribed to receive notifications of new comments posted to Ficlatte $parent_type "$parent_title".
$comment_user just posted a comment:
$comment_body
To see the comment at Ficlatte, click here:
$parent_url
To stop receiving notifications of comments on this story, click here:
$parent_unsub_url
To adjust your e-mail preferences, update your profile here:
$user_profile_url
Keep writing!
The Ficlatte team""")
message = message_template.substitute(
parent_type=parent_type, parent_title=parent.title,
comment_user=com.user.pen_name, comment_body=com.body,
parent_url=parent_url, parent_unsub_url=unsub_url,
user_profile_url=(url + reverse('profile')))
# Loop through everyone subscribed to this thread
for sub in subs:
# But only send messages to people other than the comment author, and only if there is comment text
if (sub.user != com.user and com.body):
send_notification_email(sub.user, subject, message)
#-----------------------------------------------------------------------------
def send_notification_email_story(story, parent, type_flag):
url = getattr(settings, 'SITE_URL', 'http://www.example.com/')
# Is the story a prequel or a sequel?
child_type = u''
child_type_p = u''
if type_flag == 1:
subs = Subscription.objects.filter(prequel_to=parent)
child_type = u'prequel'
child_type_p = u'prequels'
unsub_url = u'{}{}'.format(url, reverse('prequel-unsub', args=[parent.id]))
elif type_flag == 2:
subs = Subscription.objects.filter(sequel_to=parent)
child_type = u'sequel'
child_type_p = u'sequels'
unsub_url = u'{}{}'.format(url, reverse('sequel-unsub', args=[parent.id]))
else:
# Neither a prequel or a sequel, something weird is going on,
# so just bug out here
return None
child_url = u'{}{}'.format(url, reverse('story', args=[story.id]))
# Build e-mail text
subject = Template(
'Ficlatte $child_type to "$parent_title" by $story_user').substitute(
child_type=child_type, parent_title=parent.title, story_user=story.user.pen_name)
message_template = Template("""Hi.
This is the Ficlatte server. You are currently subscribed to receive notifications of new stories posted to the Ficlatte story "$parent_title".
$child_user just posted a $child_type, "$child_title":
$child_body
To read the story at Ficlatte, click here:
$child_url
To stop receiving notifications of $child_type_p on this story, click here:
$unsub_url
To adjust your e-mail preferences, update your profile here:
$user_profile_url
Keep writing!
The Ficlatte team""")
message = message_template.substitute(
parent_title=parent.title, child_title=story.title,
child_user=story.user.pen_name, child_type=child_type, child_type_p=child_type_p,
child_body=story.body, child_url=child_url, unsub_url=unsub_url,
user_profile_url=(url+reverse('profile')))
# Loop through everyone subscribed to this story
for sub in subs:
# But only send messages to people other than the story author
if sub.user != story.user:
send_notification_email(sub.user, subject, message)
#-----------------------------------------------------------------------------
def send_notification_email_challenge_story(story, challenge):
url = getattr(settings, 'SITE_URL', 'http://www.example.com/')
subs = Subscription.objects.filter(ch_entry=challenge)
story_url = u'{}{}'.format(url, reverse('story', args=[story.id]))
unsub_url = u'{}{}'.format(url, reverse('challenge-entry-unsub', args=[challenge.id]))
# Build e-mail text
subject = Template(
'Ficlatte entry to "$challenge_title" by $story_user').substitute(
challenge_title=challenge.title, story_user=story.user.pen_name)
message_template = Template("""Hi.
This is the Ficlatte server. You are currently subscribed to receive notifications of new stories posted to the Ficlatte challenge "$challenge_title".
$story_user just posted an entry, "$story_title":
$story_body
To read the story at Ficlatte, click here:
$story_url
To stop receiving notifications of entries to this challenge, click here:
$unsub_url
To adjust your e-mail preferences, update your profile here:
$user_profile_url
Keep writing!
The Ficlatte team""")
message = message_template.substitute(
challenge_title=challenge.title, story_title=story.title,
story_user=story.user.pen_name, story_body=story.body,
story_url=story_url, unsub_url=unsub_url,
user_profile_url=(url + reverse('profile')))
# Loop through everyone subscribed to this story
for sub in subs:
# But only send messages to people other than the story author
if sub.user != story.user:
send_notification_email(sub.user, subject, message)
|
ficlatte/main
|
castle/mail.py
|
Python
|
agpl-3.0
| 8,920
|
# -*- coding: utf-8 -*-
from parameterized import parameterized
from tests.unittests.utils.base_test_case import BaseTestCase, request_context
from tests.unittests.utils.payload.news import NewsPayload
class TestNewsCollection(BaseTestCase):
"""Test News collection"""
@parameterized.expand([
[None, 200],
['admin', 200],
['user_1', 200], # Owner
['user_2', 200],
])
@request_context
def test_news_collection_can_be_accessed_as(self, username, expected):
news = self.blend_news(author=self.user_1, count=3)
user = getattr(self, username) if username else None
response = NewsPayload()\
.get_collection(user=user, code=expected)\
.assertCount(3)
response.data[0].assertHasPublicAttributes(news[0])
response.data[1].assertHasPublicAttributes(news[1])
response.data[2].assertHasPublicAttributes(news[2])
|
geokrety/geokrety-api
|
tests/unittests/api/news/test_news_collection.py
|
Python
|
gpl-3.0
| 933
|
#
# Copyright 2010-2014 Hat, Inc. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Refer to the README and COPYING files for full details of the license.
#
import ctypes
import logging
import os
import socket
import subprocess
import threading
import time
from OVirtAgentLogic import AgentLogicBase, DataRetriverBase
from hooks import Hooks
# avoid pep8 warnings
def import_json():
try:
import json
return json
except ImportError:
import simplejson
return simplejson
json = import_json()
CredServer = None
try:
from CredServer import CredServer as CredServerImported
CredServer = CredServerImported
except ImportError:
# The CredServer doesn't exist in RHEL-5. So we provide a
# fake server that do nothing.
class CredServerFake(threading.Thread):
def user_authenticated(self, credentials):
pass
CredServer = CredServerFake
_GUEST_SCRIPTS_INSTALL_PATH = '/usr/share/ovirt-guest-agent'
_GUEST_HOOKS_CONFIG_PATH = '/etc/ovirt-guest-agent/hooks.d'
def _get_script_path(name):
return os.path.join(_GUEST_SCRIPTS_INSTALL_PATH, name)
def _readLinesFromProcess(cmdline):
try:
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
logging.exception("Failed to run process %s", cmdline)
return []
out, err = process.communicate()
if process.returncode != 0:
logging.error("Process %s returned err code %d", cmdline,
process.returncode)
return []
return out.splitlines()
class Container(object):
def __init__(self):
self.container = False
if 'container' in os.environ:
self.container = os.environ['container'] == 'docker'
if self.container:
self.libc = ctypes.CDLL('libc.so.6')
try:
self.selffd = os.open('/hostproc/self/ns/mnt', os.O_RDONLY)
self.hostfd = os.open('/hostproc/1/ns/mnt', os.O_RDONLY)
except (IOError, OSError):
# We can't leave anyway, so no need to even try
self.container = False
logging.warning('Failed to open mounts for container')
def resetns(self):
if self.container:
self.libc.setns(self.selffd, 0)
def setns(self):
if self.container:
self.libc.setns(self.hostfd, 0)
def make_procfs(self, path):
if self.container:
return path.replace('/proc/', '/hostproc/')
return path
class PkgMgr(object):
def rpm_list_packages(self, app_list):
""" Implementes the package retrieval for rpm based environments """
apps = set()
for name in app_list.split():
ts = self.rpm.TransactionSet()
for app in ts.dbMatch('name', name):
apps.add("%s-%s-%s" %
(app['name'], app['version'], app['release']))
return apps
def apt_list_packages(self, app_list):
""" Implementes the package retrieval for apt based environments """
INSTALLED_STATE = self.apt_pkg.CURSTATE_INSTALLED
apps = set()
cache = self.apt_pkg.Cache()
for app in app_list.split():
if app in cache:
pkg = cache[app]
# Normal package
if pkg.current_state == INSTALLED_STATE:
detail = (app, pkg.current_ver.ver_str)
apps.add("%s-%s" % (detail))
# virtual package
elif len(pkg.provides_list) > 0:
for _, _, pkg in pkg.provides_list:
if pkg.parent_pkg.current_state == INSTALLED_STATE:
detail = (app, pkg.parent_pkg.current_ver.ver_str)
apps.add("%s-%s" % (detail))
return apps
def list_pkgs(self, app_list):
""" Implements the package retrieval for apt and rpm if present and
returns a joined list of packages installed on the system. """
apps = set()
try:
if self.rpm:
apps.update(self.rpm_list_packages(app_list))
if self.apt_pkg:
apps.update(self.apt_list_packages(app_list))
except Exception:
logging.exception("Failed to get package list")
apps = list(apps)
logging.debug("PkgMgr: list_pkgs returns [%s]" % (str(apps)))
return apps
def __init__(self):
self.rpm = None
self.apt_pkg = None
try:
import rpm
self.rpm = rpm
except ImportError:
pass
try:
from apt import apt_pkg
apt_pkg.init()
self.apt_pkg = apt_pkg
except ImportError:
pass
if not self.rpm and not self.apt_pkg:
logging.info("Unknown package management. Application list "
"report is disabled.")
class NicMgr(object):
def __init__(self):
try:
import ethtool
except ImportError:
raise NotImplementedError
self.ethtool = ethtool
self.list_nics = self.ethtool_list_nics
def _get_ipv4_addresses(self, dev):
if hasattr(dev, 'get_ipv4_addresses'):
ipv4_addrs = []
for ip in dev.get_ipv4_addresses():
ipv4_addrs.append(ip.address)
return ipv4_addrs
if dev.ipv4_address is not None:
return [dev.ipv4_address]
else:
return []
def _get_ipv6_addresses(self, dev):
ipv6_addrs = []
for ip in dev.get_ipv6_addresses():
ipv6_addrs.append(ip.address)
return ipv6_addrs
def ethtool_list_nics(self):
interfaces = list()
try:
for dev in self.ethtool.get_devices():
flags = self.ethtool.get_flags(dev)
if flags & self.ethtool.IFF_UP and \
not(flags & self.ethtool.IFF_LOOPBACK):
devinfo = self.ethtool.get_interfaces_info(dev)[0]
interfaces.append(
{'name': dev,
'inet': self._get_ipv4_addresses(devinfo),
'inet6': self._get_ipv6_addresses(devinfo),
'hw': self.ethtool.get_hwaddr(dev)})
except:
logging.exception("Error retrieving network interfaces.")
return interfaces
class CommandHandlerLinux:
def __init__(self, agent):
self.agent = agent
def lock_screen(self):
cmd = [_get_script_path('ovirt-locksession')]
logging.debug("Executing lock session command: '%s'", cmd)
subprocess.call(cmd)
def login(self, credentials):
self.agent.cred_server.user_authenticated(credentials)
def logoff(self):
CMD = ['/usr/share/ovirt-guest-agent/ovirt-logout']
logging.debug("Executing logout command: '%s'", CMD)
subprocess.call(CMD)
def shutdown(self, timeout, msg, reboot=False):
# The shutdown command works with minutes while vdsm send value in
# seconds, so we round up the value to minutes.
delay = (int(timeout) + 59) / 60
param = '-h'
action = 'shutdown'
if reboot:
param = '-r'
action = 'reboot'
cmd = [_get_script_path('ovirt-shutdown'), param,
"+%d" % (delay), "\"%s\"" % (msg)]
logging.debug("Executing %s command: %s", action, cmd)
subprocess.call(cmd)
def hibernate(self, state):
cmd = [_get_script_path('ovirt-hibernate'), state]
logging.debug("Executing hibernate command: %s", cmd)
subprocess.call(cmd)
def set_number_of_cpus(self, count):
pass # currently noop
class LinuxDataRetriver(DataRetriverBase):
def __init__(self):
self.container = Container()
try:
pkgmgr = PkgMgr()
except NotImplementedError:
self.list_pkgs = lambda app_list: []
else:
self.list_pkgs = pkgmgr.list_pkgs
try:
nicmgr = NicMgr()
except NotImplementedError:
self.list_nics = lambda: []
else:
self.list_nics = nicmgr.list_nics
self.app_list = ""
self.ignored_fs = ""
self.ignore_zero_size_fs = True
self._init_vmstat()
DataRetriverBase.__init__(self)
def getMachineName(self):
return socket.getfqdn()
def getOsVersion(self):
return os.uname()[2]
def getContainerList(self):
cmd = [_get_script_path('ovirt-container-list')]
# skip if not available
if not os.path.exists(cmd[0]):
return []
logging.debug('Executing ovirt-container-list command')
result = []
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = json.loads(p.stdout.read())
except Exception:
logging.exception('ovirt-container-list invocation failed')
return result
def getOsInfo(self):
cmd = [_get_script_path('ovirt-osinfo')]
logging.debug('Executing ovirt-osinfo command: %s', cmd)
result = {
'version': '',
'distribution': '',
'codename': '',
'arch': '',
'type': 'linux',
'kernel': ''}
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout.read().split('\n'):
line = line.strip()
if line:
k, v = line.split('=', 1)
if v and k in result:
result[k] = v
except Exception:
logging.exception('ovirt-osinfo invocation failed')
return result
def getAllNetworkInterfaces(self):
return self.list_nics()
def getApplications(self):
return self.list_pkgs(self.app_list)
def getAvailableRAM(self):
free = 0
for line in open(self.container.make_procfs('/proc/meminfo')):
var, value = line.strip().split()[0:2]
if var in ('MemFree:', 'Buffers:', 'Cached:'):
free += long(value)
return str(free / 1024)
def getUsers(self):
self.container.setns()
users = ''
try:
cmdline = '/usr/bin/users | /usr/bin/tr " " "\n" | /usr/bin/uniq'
users = ' '.join(os.popen(cmdline).read().split())
except:
logging.exception("Error retrieving logged in users.")
self.container.resetns()
return users
def getActiveUser(self):
self.container.setns()
users = os.popen('/usr/bin/users').read().split()
try:
user = users[0]
except:
user = 'None'
self.container.resetns()
return user
def getDisksUsage(self):
usages = list()
try:
mounts = open(self.container.make_procfs('/proc/mounts'), 'r')
for mount in mounts:
try:
(device, path, fs) = mount.split()[:3]
if fs not in self.ignored_fs and not os.path.isfile(path):
# path might include spaces.
path = path.decode("string-escape")
statvfs = os.statvfs(path)
total = statvfs.f_bsize * statvfs.f_blocks
used = total - statvfs.f_bsize * statvfs.f_bfree
if self.ignore_zero_size_fs and used == total == 0:
continue
usages.append({'path': path, 'fs': fs, 'total': total,
'used': used})
except:
logging.exception("Error retrieving disks usages.")
mounts.close()
except Exception:
logging.exception("Error during reading mounted devices")
if mounts:
mounts.close()
return usages
def getDiskMapping(self):
CMD = '/usr/share/ovirt-guest-agent/diskmapper'
mapping = {}
for line in _readLinesFromProcess([CMD]):
try:
name, serial = line.split('|', 1)
except ValueError:
logging.exception("diskmapper tool used an invalid format")
return {}
mapping[serial] = {'name': name}
return mapping
def getMemoryStats(self):
try:
self._get_meminfo()
self._get_vmstat()
except:
logging.exception("Error retrieving memory stats.")
return self.memStats
def _init_vmstat(self):
self.vmstat = {}
self.vmstat['timestamp_prev'] = time.time()
fields = ['swap_in', 'swap_out', 'pageflt', 'majflt']
for field in fields:
self.vmstat[field + '_prev'] = None
self.vmstat[field + '_cur'] = None
def _get_meminfo(self):
fields = {'MemTotal:': 0, 'MemFree:': 0, 'Buffers:': 0,
'Cached:': 0, 'SwapFree:': 0, 'SwapTotal:': 0}
free = 0
for line in open(self.container.make_procfs('/proc/meminfo')):
(key, value) = line.strip().split()[0:2]
if key in fields.keys():
fields[key] = int(value)
if key in ('MemFree:', 'Buffers:', 'Cached:'):
free += int(value)
self.memStats['mem_total'] = fields['MemTotal:']
self.memStats['mem_unused'] = fields['MemFree:']
self.memStats['mem_free'] = free
self.memStats['mem_buffers'] = fields['Buffers:']
self.memStats['mem_cached'] = fields['Cached:']
swap_used = fields['SwapTotal:'] - fields['SwapFree:']
self.memStats['swap_usage'] = swap_used
self.memStats['swap_total'] = fields['SwapTotal:']
def _get_vmstat(self):
"""
/proc/vmstat reports cumulative statistics so we must subtract the
previous values to get the difference since the last collection.
"""
fields = {'pswpin': 'swap_in', 'pswpout': 'swap_out',
'pgfault': 'pageflt', 'pgmajfault': 'majflt'}
self.vmstat['timestamp_cur'] = time.time()
interval = self.vmstat['timestamp_cur'] - self.vmstat['timestamp_prev']
self.vmstat['timestamp_prev'] = self.vmstat['timestamp_cur']
for line in open(self.container.make_procfs('/proc/vmstat')):
(key, value) = line.strip().split()[0:2]
if key in fields.keys():
name = fields[key]
self.vmstat[name + '_prev'] = self.vmstat[name + '_cur']
self.vmstat[name + '_cur'] = int(value)
if self.vmstat[name + '_prev'] is None:
self.vmstat[name + '_prev'] = self.vmstat[name + '_cur']
self.memStats[name] = int((self.vmstat[name + '_cur'] -
self.vmstat[name + '_prev']) /
interval)
class LinuxVdsAgent(AgentLogicBase):
def __init__(self, config):
AgentLogicBase.__init__(self, config)
self.dr = LinuxDataRetriver()
self.dr.app_list = config.get("general", "applications_list")
self.dr.ignored_fs = set(config.get("general", "ignored_fs").split())
self.dr.ignore_zero_size_fs = config.get("general",
"ignore_zero_size_fs")
self.commandHandler = CommandHandlerLinux(self)
self.cred_server = CredServer()
self.hooks = Hooks(logging.getLogger('Hooks'),
_GUEST_HOOKS_CONFIG_PATH)
def run(self):
self.cred_server.start()
AgentLogicBase.run(self)
def stop(self):
self.cred_server.join()
AgentLogicBase.stop(self)
def test():
from pprint import pprint
dr = LinuxDataRetriver()
dr.app_list = "kernel kernel-headers aspell"
dr.ignored_fs = set("rootfs tmpfs autofs cgroup selinuxfs udev mqueue "
"nfsd proc sysfs devtmpfs hugetlbfs rpc_pipefs devpts "
"securityfs debugfs binfmt_misc fuse.gvfsd-fuse "
"fuse.gvfs-fuse-daemon fusectl usbfs".split())
print "Machine Name:", dr.getMachineName()
print "Fully Qualified Domain Name:", dr.getFQDN()
print "OS Version:", dr.getOsVersion()
print "Network Interfaces:",
pprint(dr.getAllNetworkInterfaces())
print "Installed Applications:",
pprint(dr.getApplications())
print "Available RAM:", dr.getAvailableRAM()
print "Logged in Users:", dr.getUsers()
print "Active User:", dr.getActiveUser()
print "Disks Usage:",
pprint(dr.getDisksUsage())
print "Disk Mapping:",
pprint(dr.getDiskMapping())
print "Memory Stats:", dr.getMemoryStats()
if __name__ == '__main__':
test()
|
Seitanas/kvm-vdi
|
guest_agent/ovirt-guest-agent/GuestAgentLinux2.py
|
Python
|
mit
| 17,581
|
from __future__ import print_function
from random import randint
from .utils import clear_screen
from . import colors
class Board(object):
def __init__(self, size_x, size_y, color_range):
self.size_x = size_x
self.size_y = size_y
self.area = size_x * size_y
self.board = [[randint(1, color_range) for _ in range(size_x)] for _ in range(size_y)]
def get(self, cell):
x, y = cell
return self.board[y][x]
def illustrate(self, players):
clear_screen()
for y, row in enumerate(self.board):
for x, cell in enumerate(row):
for player in players:
if (x, y) in player.domain:
print(colors.color_text(colors.keys[player.color], player.symbol), end=' ')
break
else:
print(colors.color_text(colors.keys[cell], cell), end=' ')
print()
print()
|
CSZakides/colors
|
src/board.py
|
Python
|
gpl-2.0
| 966
|
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
ProjectQ-Framework/ProjectQ
|
projectq/tests/__init__.py
|
Python
|
apache-2.0
| 644
|
import numpy as np
from sklearn.cluster import KMeans, AffinityPropagation, SpectralClustering
from sklearn.manifold import Isomap, MDS
from plotting import plot_clusters
__all__ = [ 'Manifold' ]
class Manifold(object):
def __init__(self, dismat, labels=None, cls=[], pos=[]):
self.dismat = np.asarray(dismat)
self.labels = labels
self._cls = cls # Clusters info
self.pos = pos
def __str__(self):
return self.dismat.__str__()
@property
def cls(self):
"""
views clusters as lists
"""
return [[self.labels[i] for i,lab in enumerate(self._cls) if lab == x]
for x in set(self._cls)]
#
# Clustering methods
#
def KMeans(self, n_clusters=10, init='k-means++', max_iter=100,
n_init=1, verbose=1, show=True):
"""
Clusters the objects in `dismat` using k-means algorithm. This requires
`pos` be precomputed by `mds` or `isomap`. For parameters of the
algorithms see:
http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.
html#sklearn.cluster.KMeans
:param n_clusters: Number of clusters used as the parameter for K-means.
:type n_clusters: int, optional
:param show: Shows the resulting clusters if true.
:type n_clusters: boolean, optional
"""
if len(self.pos)==0:
raise Exception('K-Means requires low dimentional coordinates. Try mds() or isomap() first.')
model = KMeans(n_clusters=n_clusters, init=init, max_iter=max_iter,
n_init=n_init,verbose=verbose).fit(self.pos)
self._cls = model.labels_
if show:
return self.cls
def AffinityPropagation(self, show=True):
"""
Clusters objects in `dismat` using affinity propagation algorithm.
:param show: Shows the resulting clusters if true.
:type n_clusters: boolean, optional
"""
model = AffinityPropagation(affinity='precomputed').fit(self.dismat)
self._cls = model.labels_
if show:
return self.cls
def SpectralClustering(self, n_clusters=10, show=True):
"""
Clusters objects in `dismat` using spectral clustering.
:param n_clusters: Number of clusters used as the parameter for K-means.
:type n_clusters: int, optional
:param show: Shows the resulting clusters if true.
:type n_clusters: boolean, optional
"""
model = SpectralClustering(n_clusters=n_clusters,
affinity='precomputed').fit(self.dismat)
self._cls = model.labels_
if show:
return self.cls
#
# Manifold learning methods
#
def mds(self, n_components=2, dissimilarity='precomputed', show=False):
"""
Calculates lower dimention coordinates using the mds algorithm.
This requires sklearn ver 0.14 due to the dissimilarity argument.
:param n_components: dimentionality of the reduced space.
:type n_components: int, optional
:param show: Shows the calculated coordinates if true.
:type show: boolean, optional
"""
model = MDS(n_components=n_components, dissimilarity=dissimilarity, max_iter=100)
self.pos = model.fit_transform(self.dismat)
if show:
return self.pos
def isomap(self, n_components=2, n_neighbors=3, show=False):
"""
Calculates lower dimention coordinates using the isomap algorithm.
:param n_components: dimentionality of the reduced space
:type n_components: int, optional
:param n_neighbors: Used by isomap to determine the number of neighbors
for each point. Large neighbor size tends to produce a denser map.
:type n_neighbors: int, optional
:param show: Shows the calculated coordinates if true.
:type show: boolean, optional
"""
model = Isomap(n_components=n_components, n_neighbors=n_neighbors)
self.pos = model.fit(self.dismat).embedding_
if show:
return self.pos
def plot(self, xy = (0,1)):
"""
Outputs 2d embeded plot based on `pos`
:param xy: specifies the dimsntions of pos to be plotted.
:type xy: tuple, optional
"""
return plot_clusters(self.pos[:,[xy[0],xy[1]]], self.labels, clusters=self._cls)
|
iSumitG/vsm
|
vsm/extensions/clustering/manifold.py
|
Python
|
mit
| 4,501
|
"""
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import *
from collections import defaultdict
__all__ = ['cycle_basis','simple_cycles']
__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
'Aric Hagberg <hagberg@lanl.gov>'])
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cycle_basis(G,root=None):
""" Returns a list of cycles which form a basis for cycles of G.
A basis for cycles of a network is a minimal collection of
cycles such that any cycle in the network can be written
as a sum of cycles in the basis. Here summation of cycles
is defined as "exclusive or" of the edges. Cycle bases are
useful, e.g. when deriving equations for electric circuits
using Kirchhoff's Laws.
Parameters
----------
G : NetworkX Graph
root : node, optional
Specify starting node for basis.
Returns
-------
A list of cycle lists. Each cycle list is a list of nodes
which forms a cycle (loop) in G.
Examples
--------
>>> G=nx.Graph()
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([0,3,4,5])
>>> print(nx.cycle_basis(G,0))
[[3, 4, 5, 0], [1, 2, 3, 0]]
Notes
-----
This is adapted from algorithm CACM 491 [1]_.
References
----------
.. [1] Paton, K. An algorithm for finding a fundamental set of
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
See Also
--------
simple_cycles
"""
gnodes=set(G.nodes())
cycles=[]
while gnodes: # loop over connected components
if root is None:
root=gnodes.pop()
stack=[root]
pred={root:root}
used={root:set()}
while stack: # walk the spanning tree finding cycles
z=stack.pop() # use last-in so cycles easier to find
zused=used[z]
for nbr in G[z]:
if nbr not in used: # new node
pred[nbr]=z
stack.append(nbr)
used[nbr]=set([z])
elif nbr == z: # self loops
cycles.append([z])
elif nbr not in zused:# found a cycle
pn=used[nbr]
cycle=[nbr,z]
p=pred[z]
while p not in pn:
cycle.append(p)
p=pred[p]
cycle.append(p)
cycles.append(cycle)
used[nbr].add(z)
gnodes-=set(pred)
root=None
return cycles
@not_implemented_for('undirected')
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.simple_cycles(G)
[[0, 0], [0, 1, 2, 0], [0, 2, 0], [1, 2, 1], [2, 2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path + [startnode])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result
|
jtorrents/networkx
|
networkx/algorithms/cycles.py
|
Python
|
bsd-3-clause
| 6,479
|
from lib.checker import Checker
from parameterizedtestcase import ParameterizedTestCase
class TestPrivateKeyChecker(ParameterizedTestCase):
def test_provider(self):
err1 = "\"PRIVATE KEY\" found, but not allowed (privatekey)."
err2 = "\"ssh-rsa\" found, but not allowed (privatekey)."
return (
# Nothing wrong here
({"changes": "Hello"}, []),
# PRIVATE KEY
({"changes": "foo; PRIVATE KEY is there!"}, [err1]),
# ssh-rsa
({"changes": "ssh-rsa follows..."}, [err2])
)
@ParameterizedTestCase.parameterize(
("opts", "expected"),
test_provider(Checker)
)
def test_private_key_checker(self, opts, expected):
opts.update({
"key": "privatekey",
"forbidden": ["PRIVATE KEY", "ssh-rsa"],
"filepath": "fizzbuzz.js",
"enabled": True
})
checker = Checker(opts)
self.assertEqual(checker.err_messages, expected)
|
artburkart/gitpatrol
|
tests/TestPrivateKeyChecker.py
|
Python
|
mit
| 1,017
|
"""Tests for the cryptolib library"""
import unittest
import cryptolib
class TestNoSet(unittest.TestCase):
"""No set tests"""
def test_hex_to_bytes(self):
"""No challenge"""
test = cryptolib.hex_to_bytes('42')
result = b'B'
self.assertEqual(test, result)
def test_bytes_to_hex(self):
"""No challenge"""
test = cryptolib.bytes_to_hex(b'B')
result = '42'
self.assertEqual(test, result)
def test_string_to_bytes(self):
"""No challenge"""
test = cryptolib.string_to_bytes('42')
result = b'42'
self.assertEqual(test, result)
def test_bytes_to_string(self):
"""No challenge"""
test = cryptolib.bytes_to_string(b'42')
result = '42'
self.assertEqual(test, result)
class TestSet1(unittest.TestCase):
"""Set 1 tests"""
# X to Y functions
def test_hex_to_base64(self):
"""Set 1 Challenge 1"""
test = ('49276d206b696c6c696e6720796f757220627261696e206c696b652061207'
'06f69736f6e6f7573206d757368726f6f6d')
test = cryptolib.hex_to_base64(test)
result = (b'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2'
b'hyb29t')
self.assertEqual(test, result)
def test_base64_to_hex(self):
"""Set 1 Challenge 1 (continued)"""
test = ('SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb'
'29t')
test = cryptolib.bytes_to_hex(cryptolib.base64_to_bytes(test))
result = ('49276d206b696c6c696e6720796f757220627261696e206c696b6520612'
'0706f69736f6e6f7573206d757368726f6f6d')
self.assertEqual(test, result)
# XOR
def test_xor(self):
"""Set 1 Challenge 2"""
buffer = cryptolib.hex_to_bytes('1c0111001f010100061a024b53535009181c')
key = cryptolib.hex_to_bytes('686974207468652062756c6c277320657965')
result = cryptolib.hex_to_bytes('746865206b696420646f6e277420706c6179')
test = cryptolib.xor(buffer, key)
self.assertEqual(test, result)
def test_generate_keys(self):
"""Set 1 Challenge 3"""
self.assertEqual(len(cryptolib.generate_keys()), 256)
def test_decrypt_xor(self):
"""Set 1 Challenge 3"""
buffer = ('1b37373331363f78151b7f2b783431333d78397828372d363c78373e783'
'a393b3736')
byte_buffer = cryptolib.hex_to_bytes(buffer)
keys = cryptolib.generate_keys()
test = cryptolib.decrypt_xor(byte_buffer, keys)
result = [[b'\x1b77316?x\x15\x1b\x7f+x413=x9x(7-6<x7>x:9;76',
[88],
18,
bytearray(b"Cooking MC\'s like a pound of bacon")]]
self.assertEqual(test, result)
def test_seek_xor(self):
"""Set 1 Challenge 4"""
with open('tests/test_seek_xor.txt') as file_descriptor:
candidates = [
cryptolib.hex_to_bytes(line.strip())
for line in file_descriptor
]
keys = cryptolib.generate_keys()
results = cryptolib.seek_xor(candidates, keys, 3)
test = [b'{ZB\x15A]TA\x15A]P\x15ETGAL\x15\\F\x15_@XE\\[R?',
[53],
16,
bytearray(b'Now that the party is jumping\n')]
self.assertEqual(results[2], test)
def test_hamming_distance(self):
"""Set 1 Challenge 6"""
buffer1 = cryptolib.string_to_bytes('this is a test')
buffer2 = cryptolib.string_to_bytes('wokka wokka!!!')
distance = cryptolib.hamming_distance(buffer1, buffer2)
self.assertEqual(distance, 37)
def test_encrypt_multibyte_xor(self):
"""Set 1 Challenge 5"""
buffer = ('Burning \'em, if you ain\'t quick and nimble'
'\nI go crazy when I hear a cymbal')
buffer = cryptolib.string_to_bytes(buffer)
test = ('0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a262'
'26324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c69'
'2b20283165286326302e27282f')
test = cryptolib.hex_to_bytes(test)
key = cryptolib.string_to_bytes('ICE')
result = cryptolib.xor(buffer, key)
self.assertEqual(test, result)
def test_divide_blocks(self):
"""Set 1 Challenge 6"""
buffer = cryptolib.string_to_bytes('1234567890')
test = [b'123', b'456', b'789']
keysize = 3
result = cryptolib.divide_in_blocks(buffer, keysize)
self.assertEqual(result, test)
def test_transpose_blocks(self):
"""Set 1 Challenge 7"""
blocks = [b'123', b'456', b'789']
test = [b'147', b'258', b'369']
result = cryptolib.transpose_blocks(blocks)
self.assertEqual(test, result)
if __name__ == '__main__':
unittest.main()
|
MagicTrucy/cryptolib
|
src/cryptotest.py
|
Python
|
mit
| 4,897
|
# Copyright (C) 2020 Renato Lima - Akretion <renato.lima@akretion.com.br>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
from ..tools import misc
class Nbm(models.Model):
_name = 'l10n_br_fiscal.nbm'
_inherit = 'l10n_br_fiscal.data.product.abstract'
_description = 'NBM'
code = fields.Char(
size=12)
code_unmasked = fields.Char(
size=10)
name = fields.Text(
string='Name',
required=True,
index=True)
product_tmpl_ids = fields.One2many(
inverse_name='nbm_id')
ncms = fields.Char(
string='NCM')
ncm_ids = fields.Many2many(
comodel_name='l10n_br_fiscal.ncm',
relation='fiscal_nbm_ncm_rel',
colunm1='nbm_id',
colunm2='ncm_id',
readonly=True,
string='NCMs')
@api.model
def create(self, values):
create_super = super(Nbm, self).create(values)
if 'ncms' in values.keys():
create_super.with_context(do_not_write=True).action_search_ncms()
return create_super
@api.multi
def write(self, values):
write_super = super(Nbm, self).write(values)
do_not_write = self.env.context.get('do_not_write')
if 'ncms' in values.keys() and not do_not_write:
self.with_context(do_not_write=True).action_search_ncms()
return write_super
@api.multi
def action_search_ncms(self):
ncm = self.env['l10n_br_fiscal.ncm']
for r in self:
if r.ncms:
domain = misc.domain_field_codes(field_codes=r.ncms)
r.ncm_ids = ncm.search(domain)
|
kmee/l10n-brazil
|
l10n_br_fiscal/models/nbm.py
|
Python
|
agpl-3.0
| 1,671
|
"""
Django settings for boarding_school_2 project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+6h5t1yp05s5golf!4+&i7$bqi97jx4rp+f6nq*z4ud3^vbflf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'boardinghouse',
'boardinghouse.contrib.template',
'boardinghouse.contrib.demo',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'school',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'boardinghouse.middleware.SchemaMiddleware',
]
ROOT_URLCONF = 'boarding_school.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'boardinghouse.context_processors.schemata',
],
},
},
]
WSGI_APPLICATION = 'boarding_school.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'boardinghouse.backends.postgres',
'NAME': os.environ.get('DB_NAME'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Swappable model is not currently working.
BOARDINGHOUSE_SCHEMA_MODEL = 'school.School'
|
schinckel/django-boardinghouse
|
examples/boarding_school/boarding_school/settings.py
|
Python
|
bsd-3-clause
| 3,496
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
# handle the case if another wrapper wraps EventFiringWebElement
if isinstance(result, EventFiringWebElement):
return result
elif isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
# result is a built in type.
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
::
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print("Before navigate to %s" % url)
def after_navigate_to(self, url, driver):
print("After navigate to %s" % url)
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._driver._wrap_value = self._wrap_value
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
warnings.warn("find_element_by_id is deprecated. Please use find_element(by=By.ID, value=id_) instead")
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
warnings.warn("find_elements_by_id is deprecated. Please use find_elements(by=By.ID, value=id_) instead")
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
warnings.warn("find_element_by_xpath is deprecated. Please use find_element(by=By.XPATH, value=xpath) instead")
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
warnings.warn("find_elements_by_xpath is deprecated. Please use find_elements(by=By.XPATH, value=xpath) instead")
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
warnings.warn("find_element_by_link_text is deprecated. Please use find_element(by=By.LINK_TEXT, value=link_text) instead")
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
warnings.warn("find_elements_by_link_text is deprecated. Please use find_elements(by=By.LINK_TEXT, value=text) instead")
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
warnings.warn("find_element_by_partial_link_text is deprecated. Please use find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) instead")
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
warnings.warn("find_elements_by_partial_link_text is deprecated. Please use find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) instead")
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
warnings.warn("find_element_by_name is deprecated. Please use find_element(by=By.NAME, value=name) instead")
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
warnings.warn("find_elements_by_name is deprecated. Please use find_elements(by=By.NAME, value=name) instead")
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
warnings.warn("find_element_by_tag_name is deprecated. Please use find_element(by=By.TAG_NAME, value=name) instead")
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
warnings.warn("find_elements_by_tag_name is deprecated. Please use find_elements(by=By.TAG_NAME, value=name) instead")
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
warnings.warn("find_element_by_class_name is deprecated. Please use find_element(by=By.CLASS_NAME, value=name) instead")
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
warnings.warn("find_elements_by_class_name is deprecated. Please use find_elements(by=By.CLASS_NAME, value=name) instead")
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
warnings.warn("find_element_by_css_selector is deprecated. Please use find_element(by=By.CSS_SELECTOR, value=css_selector) instead")
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
warnings.warn("find_elements_by_css_selector is deprecated. Please use find_elements(by=By.CSS_SELECTOR, value=css_selector) instead")
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def _wrap_value(self, value):
if isinstance(value, EventFiringWebElement):
return WebDriver._wrap_value(self._driver, value.wrapped_element)
return WebDriver._wrap_value(self._driver, value)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args, **kwargs):
try:
result = attrib(*args, **kwargs)
return _wrap_elements(result, self)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
try:
attrib = getattr(self._driver, name)
return _wrap if callable(attrib) else attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
warnings.warn("find_element_by_id is deprecated. Please use find_element(by=By.ID, value=id_) instead")
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
warnings.warn("find_elements_by_id is deprecated. Please use find_elements(by=By.ID, value=id_) instead")
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
warnings.warn("find_element_by_name is deprecated. Please use find_element(by=By.NAME, value=name) instead")
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
warnings.warn("find_elements_by_name is deprecated. Please use find_elements(by=By.NAME, value=name)=By.NAME, value=name) instead")
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
warnings.warn("find_element_by_link_text is deprecated. Please use find_element(by=By.LINK_TEXT, value=link_text) instead")
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
warnings.warn("find_elements_by_link_text is deprecated. Please use find_elements(by=By.LINK_TEXT, value=text) instead")
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
warnings.warn("find_element_by_partial_link_text is deprecated. Please use find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) instead")
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
warnings.warn("find_elements_by_partial_link_text is deprecated. Please use find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) instead")
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
warnings.warn("find_element_by_tag_name is deprecated. Please use find_element(by=By.TAG_NAME, value=name) instead")
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
warnings.warn("find_elements_by_tag_name is deprecated. Please use find_elements(by=By.TAG_NAME, value=name) instead")
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
warnings.warn("find_element_by_xpath is deprecated. Please use find_element(by=By.XPATH, value=xpath) instead")
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
warnings.warn("find_elements_by_xpath is deprecated. Please use find_elements(by=By.XPATH, value=xpath) instead")
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
warnings.warn("find_element_by_class_name is deprecated. Please use find_element(by=By.CLASS_NAME, value=name) instead")
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
warnings.warn("find_elements_by_class_name is deprecated. Please use find_elements(by=By.CLASS_NAME, value=name) instead")
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
warnings.warn("find_element_by_css_selector is deprecated. Please use find_element(by=By.CSS_SELECTOR, value=css_selector) instead")
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
warnings.warn("find_elements_by_css_selector is deprecated. Please use find_elements(by=By.CSS_SELECTOR, value=css_selector) instead")
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args, **kwargs):
try:
result = attrib(*args, **kwargs)
return _wrap_elements(result, self._ef_driver)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
try:
attrib = getattr(self._webelement, name)
return _wrap if callable(attrib) else attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
WebElement.register(EventFiringWebElement)
|
titusfortner/selenium
|
py/selenium/webdriver/support/event_firing_webdriver.py
|
Python
|
apache-2.0
| 16,700
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import threading
import time
logger = logging.getLogger(__name__)
class Blacklist(object):
def __init__(self, path):
self._blacklist_lock = threading.RLock()
self._path = path
def Read(self):
"""Reads the blacklist from the blacklist file.
Returns:
A dict containing bad devices.
"""
with self._blacklist_lock:
blacklist = dict()
if not os.path.exists(self._path):
return blacklist
try:
with open(self._path, 'r') as f:
blacklist = json.load(f)
except (IOError, ValueError) as e:
logger.warning('Unable to read blacklist: %s', str(e))
os.remove(self._path)
if not isinstance(blacklist, dict):
logger.warning('Ignoring %s: %s (a dict was expected instead)',
self._path, blacklist)
blacklist = dict()
return blacklist
def Write(self, blacklist):
"""Writes the provided blacklist to the blacklist file.
Args:
blacklist: list of bad devices to write to the blacklist file.
"""
with self._blacklist_lock:
with open(self._path, 'w') as f:
json.dump(blacklist, f)
def Extend(self, devices, reason='unknown'):
"""Adds devices to blacklist file.
Args:
devices: list of bad devices to be added to the blacklist file.
reason: string specifying the reason for blacklist (eg: 'unauthorized')
"""
timestamp = time.time()
event_info = {
'timestamp': timestamp,
'reason': reason,
}
device_dicts = {device: event_info for device in devices}
logger.info('Adding %s to blacklist %s for reason: %s', ','.join(devices),
self._path, reason)
with self._blacklist_lock:
blacklist = self.Read()
blacklist.update(device_dicts)
self.Write(blacklist)
def Reset(self):
"""Erases the blacklist file if it exists."""
logger.info('Resetting blacklist %s', self._path)
with self._blacklist_lock:
if os.path.exists(self._path):
os.remove(self._path)
|
endlessm/chromium-browser
|
third_party/catapult/devil/devil/android/device_blacklist.py
|
Python
|
bsd-3-clause
| 2,246
|
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Infrastructure Toolkit}.
"""
import os
import sys
import argparse
from entropy.i18n import _, ngettext
from entropy.output import bold, purple, darkgreen, blue, teal
import entropy.tools
from _entropy.eit.commands.descriptor import EitCommandDescriptor
from _entropy.eit.commands.command import EitCommand
class EitBranch(EitCommand):
"""
Main Eit files command.
"""
NAME = "branch"
ALIASES = []
def __init__(self, args):
EitCommand.__init__(self, args)
self._packages = []
self._from_branch = None
self._to_branch = None
self._repository_id = None
self._ask = True
self._copy = True
def _get_parser(self):
""" Overridden from EitCommand """
descriptor = EitCommandDescriptor.obtain_descriptor(
EitBranch.NAME)
parser = argparse.ArgumentParser(
description=descriptor.get_description(),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s %s" % (sys.argv[0], EitBranch.NAME))
parser.add_argument("--quick", action="store_true",
default=not self._ask,
help=_("no stupid questions"))
parser.add_argument("branch", nargs='?',
metavar="<branch>",
help=_("switch to given branch"))
parser.add_argument("repo", nargs='?',
metavar="<repo>",
help=_("repository"))
parser.add_argument("--from", metavar="<branch>",
help=_("from branch"),
dest="frombranch", default=None)
parser.add_argument("--no-copy", action="store_true",
default=not self._copy, dest="nocopy",
help=_("don't copy packages from branch"))
return parser
INTRODUCTION = """\
Switch to given branch. This will cause the creation of a
separate repository database on disk and remotely, taking the
name of the branch argument passed.
Only one branch should be used at the same time, but nothing
will prevent you from interleaving them.
Generally, this feature is used to switch the repository to a
new branch, copying all the packages over (default behaviour).
To just switch to an empty branch without copying the packages
over just use the *--no-copy* switch.
"""
SEE_ALSO = ""
def man(self):
"""
Overridden from EitCommand.
"""
return self._man()
def parse(self):
""" Overridden from EitCommand """
parser = self._get_parser()
try:
nsargs = parser.parse_args(self._args)
except IOError:
return parser.print_help, []
self._from_branch = nsargs.frombranch
self._to_branch = nsargs.branch
self._repository_id = nsargs.repo
self._ask = not nsargs.quick
self._copy = not nsargs.nocopy
return self._call_exclusive, [self._branch, self._repository_id]
def _branch(self, entropy_server):
"""
Eit branch code.
"""
if self._to_branch is None:
# show status then
return self._status(entropy_server)
repository_id = entropy_server.repository()
from_branch = self._from_branch
if from_branch is None:
from_branch = self._settings()['repositories']['branch']
else:
if not entropy.tools.validate_branch_name(from_branch):
entropy_server.output(
"%s: %s" % (
purple(_("Invalid branch")),
from_branch),
importance=1, level="error")
return 1
# validate to_branch
if not entropy.tools.validate_branch_name(self._to_branch):
entropy_server.output(
"%s: %s" % (
purple(_("Invalid branch")),
self._to_branch),
importance=1, level="error")
return 1
dbconn_old = entropy_server.open_server_repository(repository_id,
read_only = True, no_upload = True, use_branch = from_branch,
do_treeupdates = False)
pkglist = dbconn_old.listAllPackageIds()
if not pkglist:
if self._copy:
entropy_server.output(
purple(_("No packages to copy")),
importance=1, level="error")
else:
if self._copy:
entropy_server.output(
"%s %s %s: %s" % (
len(pkglist),
darkgreen(ngettext("package", "packages", len(pkglist))),
blue(_("would be copied to branch")),
bold(self._to_branch),
),
header=darkgreen(" @@ "))
if self._ask and pkglist and self._copy:
resp = entropy_server.ask_question(
_("Would you like to continue ?"))
if resp == _("No"):
return 1
# set branch to new branch first
entropy_server.set_branch(self._to_branch)
if (not pkglist) or (not self._copy):
entropy_server.output(
"[%s] %s: %s" % (
blue(entropy_server.repository()),
teal(_("switched to branch")),
purple(self._to_branch),
),
header=darkgreen(" @@ "))
return 0
status = None
try:
status = entropy_server._switch_packages_branch(
repository_id, from_branch, self._to_branch)
if status is None:
return 1
finally:
if status is None:
entropy_server.set_branch(from_branch)
switched, already_switched, ignored, \
not_found, no_checksum = status
if not_found or no_checksum:
return 1
return 0
def _status(self, entropy_server):
"""
Show branch information (list of branches, current branch)
"""
repository_id = entropy_server.repository()
branch_dir = entropy_server._get_local_repository_dir(
repository_id, branch="")
branches = []
if os.path.isdir(branch_dir):
branches += [x for x in os.listdir(branch_dir) if \
os.path.isdir(os.path.join(branch_dir, x))]
current_branch = self._settings()['repositories']['branch']
branches.sort()
for branch in branches:
cur_txt = ""
if branch == current_branch:
cur_txt = purple("*") + " "
entropy_server.output("%s%s" % (cur_txt, branch))
return 0
EitCommandDescriptor.register(
EitCommandDescriptor(
EitBranch,
EitBranch.NAME,
_('manage repository branches'))
)
|
Sabayon/entropy
|
server/eit/commands/branch.py
|
Python
|
gpl-2.0
| 7,185
|
import sys
import pytest
from flask import current_app
from unifispot.core.models import Wifisite,Device,Guesttrack
from unifispot.core.guestutils import validate_track,redirect_guest
from .test_baseviews import TestView
##keep all fixtures here which need to register a view or something
#-------------------------------for test_guestutils----------------------
@pytest.fixture(scope='session')
def register_testvalidateview(app):
#create a test view for testing validate_track
@current_app.route('/validate_track/<trackid>')
@validate_track
def trackview(trackid,*args,**kwargs):
assert isinstance(kwargs['wifisite'],Wifisite)
assert isinstance(kwargs['guesttrack'],Guesttrack)
assert isinstance(kwargs['guestdevice'],Device)
return 'OK'
@pytest.fixture(scope='session')
def register_testredirectguest(app):
#create a test view for testing validate_track
@current_app.route('/redirectlanding/<trackid>')
def redirectguest(trackid,*args,**kwargs):
guesttrack = Guesttrack.query.filter_by(trackid=trackid).first()
wifisite = Wifisite.query.filter_by(id=guesttrack.siteid).first()
return redirect_guest(wifisite,guesttrack)
###----------------------------for test_redirect_to_landing----------------
@pytest.fixture(scope='session')
def register_testview(app):
TestView.register(current_app, route_base='/test')
|
Spotipo/spotipo
|
tests/core/conftest.py
|
Python
|
agpl-3.0
| 1,419
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("delft3dworker", "0072_auto_20160830_2334"),
]
operations = [
migrations.AlterField(
model_name="container",
name="container_type",
field=models.CharField(
default="preprocess",
max_length=16,
choices=[
("preprocess", "preprocess"),
("delft3d", "delft3d"),
("process", "process"),
("postprocess", "postprocess"),
("export", "export"),
("sync_cleanup", "sync_cleanup"),
],
),
),
]
|
openearth/delft3d-gt-server
|
delft3dworker/migrations/0073_auto_20160907_1301.py
|
Python
|
gpl-3.0
| 819
|
# Created by Benjamin J. Thompson <bjt@rabidquill.com>
from os.path import dirname
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
from datetime import datetime, timedelta
from time import mktime
import parsedatetime
import re
import requests
from bs4 import BeautifulSoup
__author__ = 'bjt'
LOGGER = getLogger(__name__)
def match_class(target):
def do_match(tag):
classes = tag.get('class', [])
return all(c in classes for c in target)
return do_match
class NextLaunchSkill(MycroftSkill):
def __init__(self):
super(NextLaunchSkill, self).__init__(name="NextLaunchSkill")
def initialize(self):
# self.load_data_files(dirname(__file__))
next_launch_intent = IntentBuilder("NextLaunchIntent").\
require("NextLaunchKeyword").build()
self.register_intent(next_launch_intent, self.handle_next_launch_intent)
def handle_next_launch_intent(self, message):
html = requests.get("http://spaceflightnow.com/launch-schedule/").content
soup = BeautifulSoup(html, 'html.parser')
schedule = []
cal = parsedatetime.Calendar()
datenames = soup.find_all(match_class(['datename']))
missiondatas = soup.find_all(match_class(['missiondata']))
missdescrips = soup.find_all(match_class(['missdescrip']))
for n in range(len(datenames)):
try:
launch = dict(launch_date=datenames[n].find('span', attrs={'class': 'launchdate'}).text,
rocket_name=datenames[n].find('span', attrs={'class': 'mission'}).text.replace(u"\u2022 ", ""),
#launch_description=missdescrips[n].text,
launch_time=missiondatas[n].text.split("\n")[0].replace("Launch window: ", "").replace("Launch time: ", ""),
launch_location=missiondatas[n].text.split("\n")[1].replace("Launch site: ", ""))
launch['launch_date'] = launch['launch_date'].replace("Sept.", "Sep.") # For some reason the parser doesn't like Sept
gmt_time = re.search(r"^\d{4}", launch['launch_time']).group(0)
launch['launch_date'] += ' ' + ":".join([gmt_time[:2], gmt_time[2:]]) + ' GMT'
time_struct, parse_status = cal.parse(launch['launch_date'])
if parse_status == 0:
print("Could not parse for {0} - {1}".format(launch['launch_date'], launch['rocket_name']))
continue # If this didn't work it's probably not the upcoming launch
sched_date = datetime.fromtimestamp(mktime(time_struct))
time_till = sched_date - datetime.now()
launch['time_till'] = time_till
launch['launch_date'] = sched_date.strftime("%B %d")
launch['launch_time'] = re.search(r"(\d+:.+EDT)", launch['launch_time']).group(0).replace("EDT", "Eastern Daylight Time").replace("EST", "Eastern Standard Time")
schedule.append(launch)
except Exception as e:
print(e.message)
continue
sorted_schedule = sorted(schedule, key=lambda k: k['time_till'])
print sorted_schedule
self.speak_dialog("next.launch", sorted_schedule[0])
self.add_result("launch_date", sorted_schedule[0]["launch_date"])
self.add_result("launch_time", sorted_schedule[0]["launch_time"])
self.add_result("time_till_launch", sorted_schedule[0]["time_till"])
self.add_result("schedule", sorted_schedule)
self.emit_results()
def stop(self):
pass
def create_skill():
return NextLaunchSkill()
|
JarbasAI/jarbas-core
|
mycroft/jarbas-skills/skill_next_rocket_launch/__init__.py
|
Python
|
gpl-3.0
| 3,779
|
#!/usr/bin/python
# Copyright 2016 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gcpubsub
version_added: "2.3"
short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
description:
- Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
See U(https://cloud.google.com/pubsub/docs) for an overview.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- Subscription pull happens before publish. You cannot publish and pull in the same task.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: True
subscription:
description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
required: False
ack_deadline:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False
pull:
description: Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name. max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint:
description: Subfield of subscription. Not required. If specified, message will be sent to an endpoint. See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
required: False
state:
description: State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the subscription.
required: False
default: "present"
'''
EXAMPLES = '''
# Create a topic and publish a message to it
# (Message will be pushed; there is no check to see if the message was pushed before
# Topics:
## Create Topic
gcpubsub:
topic: ansible-topic-example
state: present
## Delete Topic
### Subscriptions associated with topic are not deleted.
gcpubsub:
topic: ansible-topic-example
state: absent
## Messages: publish multiple messages, with attributes (key:value available with the message)
### setting absent will keep the messages from being sent
gcpubsub:
topic: "{{ topic_name }}"
state: present
publish:
- message: "this is message 1"
attributes:
mykey1: myvalue
mykey2: myvalu2
mykey3: myvalue3
- message: "this is message 2"
attributes:
server: prod
sla: "99.9999"
owner: fred
# Subscriptions
## Create Subscription (pull)
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: present
## Create Subscription with ack_deadline and push endpoint
### pull is default, ack_deadline is not required
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
ack_deadline: "60"
push_endpoint: http://pushendpoint.example.com
state: present
## Subscription change from push to pull
### setting push_endpoint to "None" converts subscription to pull.
gcpubsub:
topic: ansible-topic-example
subscription:
name: mysub
push_endpoint: "None"
## Delete subscription
### Topic will not be deleted
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: absent
## Pull messages from subscription
### only pull keyword is required.
gcpubsub:
topic: ansible-topic-example
subscription:
name: ansible-topic-example-sub
pull:
message_ack: yes
max_messages: "100"
'''
RETURN = '''
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
returned: Only when specified
type: list of dictionary
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
pulled_messages:
description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
returned: Only when subscription.pull is specified
type: list of dictionary
sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
state:
description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
subscription:
description: Name of subscription.
returned: When subscription fields are specified
type: str
sample: "mysubscription"
topic:
description: Name of topic.
returned: Always
type: str
sample: "mytopic"
'''
CLOUD_CLIENT = 'google-cloud-pubsub'
CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
def publish_messages(message_list, topic):
with topic.batch() as batch:
for message in message_list:
msg = message['message']
attrs = {}
if 'attributes' in message:
attrs = message['attributes']
batch.publish(bytes(msg), **attrs)
return True
def pull_messages(pull_params, sub):
"""
:rtype: tuple (output, changed)
"""
changed = False
max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False)
output= []
pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages)
for ack_id, msg in pulled:
msg_dict = {'message_id': msg.message_id,
'attributes': msg.attributes,
'data': msg.data,
'ack_id': ack_id }
output.append(msg_dict)
if message_ack:
ack_ids = [m['ack_id'] for m in output]
if ack_ids:
sub.acknowledge(ack_ids)
changed = True
return (output, changed)
def main():
module = AnsibleModule(argument_spec=dict(
topic=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
publish=dict(type='list', default=None),
subscription=dict(type='dict', default=None),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['publish'] = module.params.get('publish')
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['subscription'] = module.params.get('subscription')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
changed = False
json_output = {}
t = None
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
s = None
if mod_params['subscription']:
# Note: default ack deadline cannot be changed without deleting/recreating subscription
s = t.subscription(mod_params['subscription']['name'],
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
if mod_params['state'] == 'absent':
# Remove the most granular resource. If subcription is specified
# we remove it. If only topic is specified, that is what is removed.
# Note that a topic can be removed without first removing the subscription.
# TODO(supertom): Enhancement: Provide an option to only delete a topic
# if there are no subscriptions associated with it (which the API does not support).
if s is not None:
if s.exists():
s.delete()
changed = True
else:
if t.exists():
t.delete()
changed = True
elif mod_params['state'] == 'present':
if not t.exists():
t.create()
changed = True
if s:
if not s.exists():
s.create()
s.reload()
changed = True
else:
# Subscription operations
# TODO(supertom): if more 'update' operations arise, turn this into a function.
s.reload()
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
if push_endpoint is not None:
if push_endpoint != s.push_endpoint:
if push_endpoint == 'None':
push_endpoint = None
s.modify_push_configuration(push_endpoint=push_endpoint)
s.reload()
changed = push_endpoint == s.push_endpoint
if 'pull' in mod_params['subscription']:
if s.push_endpoint is not None:
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
(json_output['pulled_messages'], changed) = pull_messages(
mod_params['subscription']['pull'], s)
# publish messages to the topic
if mod_params['publish'] and len(mod_params['publish']) > 0:
changed = publish_messages(mod_params['publish'], t)
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gcp import *
if __name__ == '__main__':
main()
|
grimmjow8/ansible
|
lib/ansible/modules/cloud/google/gcpubsub.py
|
Python
|
gpl-3.0
| 11,841
|
'''
Created on May 9, 2013
@author: Bartosz Alchimowicz
'''
import copy
import clone
import format.model
def actor(target, source):
target.name = source.name
target.identifier = source.identifier
target.type = source.type
target.communication = source.communication
target.description = source.description
target.properties = source.properties
return target
def business_object(target, source):
target.name = source.name
target.identifier = source.identifier
target.description = source.description
target.attributes = source.attributes
target.properties = source.properties
target.state_diagram = source.state_diagram
return target
def usecase(target, source):
def structure(target, source):
# reuse structure in order not to fix all references
refs = source.refs
target.scenario.items = items = []
for step_id, step_co in enumerate(source.scenario.items):
if refs.has_key(step_co):
items.append(refs[step_co])
del refs[step_co]
else:
items.append(format.model.Step())
items[step_id].events = events = []
for event_id, event_co in enumerate(step_co.events):
if refs.has_key(event_co):
events.append(refs[event_co])
del refs[event_co]
else:
events.append(format.model.Event())
items[step_id].events[event_id].scenario.items = ssteps = []
for sstep_id, sstep_co in enumerate(event_co.scenario.items):
if refs.has_key(sstep_co):
ssteps.append(refs[sstep_co])
del refs[sstep_co]
else:
ssteps.append(format.model.Step())
structure(target, source) # reuse structure
clone.usecase_content(target, source, None) # copy content
target.setParent()
return target
|
wafaast/afefuc-project
|
src/utils/update.py
|
Python
|
mit
| 1,700
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# line_follower.py
#
# Copyright 2010 Manuel Martín Ortiz <manuel.martin@itrblabs.eu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# -- Photo Taker --
#
# Simple program to take pictures of the camera while turning on itself
#
from ePuck import ePuck
import sys
import re
import time
# You can use this dictionary to asociate an ePuck ID with its MAC Address
epucks = {
'1797' : '10:00:E8:6C:A2:B6',
'1903' : '10:00:E8:6C:A1:C7'
}
def log(text):
""" Show @text in standart output with colors """
blue = '\033[1;34m'
off = '\033[1;m'
print(''.join((blue, '[Log] ', off, str(text))))
def error(text):
red = '\033[1;31m'
off = '\033[1;m'
print(''.join((red, '[Error] ', off, str(text))))
def main(mac):
global_speed = 180
fs_speed = 0.6
threshold = 1000
print('Connecting with the ePuck')
try:
# First, create an ePuck object.
# If you want debug information:
#~ robot = ePuck(mac, debug = True)
# ele:
robot = ePuck(mac)
# Second, connect to it
robot.connect()
# You can enable various sensors at the same time. Take a look to
# to DIC_SENSORS for know the name of the sensors
robot.enable('camera', 'motor_position')
# We have to set the camera parameters
robot.set_camera_parameters('RGB_365', 40, 40, 8)
log('Conection complete. CTRL+C to stop')
log('Library version: ' + robot.version)
except Exception, e:
error(e)
sys.exit(1)
try:
counter = 0
while True:
# Important: when you execute 'step()', al sensors
# and actuators are updated. All changes you do on the ePuck
# will be effectives after this method, not before
robot.step()
image = robot.get_image()
if image != None:
# Do something with the image
robot.save_image('ePuck-' + str(counter) + '.png')
counter += 1
# Set the motors speed and position
robot.set_motors_speed(100,-100)
robot.set_motor_position(0,0)
# Make a 'step()' and the robot will move
robot.step()
while robot.get_motor_position()[0] < 270:
# Keep turning on itself
robot.step()
# Stop the robot (don't forget the 'step()')
robot.stop()
robot.step()
# Sleep, otherwise we will not see the stop
time.sleep(1)
except KeyboardInterrupt:
log('Stoping the robot. Bye!')
robot.close()
sys.exit()
except Exception, e:
error(e)
return 0
if __name__ == '__main__':
X = '([a-fA-F0-9]{2}[:|\-]?){6}'
if len(sys.argv) < 2:
error("Usage: " + sys.argv[0] + " ePuck_ID | MAC Address")
sys.exit()
robot_id = sys.argv[1]
if epucks.has_key(robot_id):
main(epucks[robot_id])
elif re.match(X, robot_id) != 0:
main(robot_id)
else:
error('You have to indicate the MAC direction of the robot')
|
RL-LDV-TUM/epucklib
|
examples/photo_taker.py
|
Python
|
gpl-3.0
| 3,531
|
import re
bold = re.compile(r'\*{2}(.*?)\*{2}')
text = 'Make this **bold**. This **too**.'
print('Text:', text)
print('Bold:', bold.sub(r'<b>\1</b>', text))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_text/re_sub.py
|
Python
|
apache-2.0
| 162
|
'''OpenGL extension OES.required_internalformat
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.required_internalformat to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/required_internalformat.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.required_internalformat import *
from OpenGL.raw.GLES1.OES.required_internalformat import _EXTENSION_NAME
def glInitRequiredInternalformatOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/GLES1/OES/required_internalformat.py
|
Python
|
lgpl-3.0
| 822
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-20 08:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0003_pezoneschat'),
]
operations = [
migrations.RenameModel(
old_name='pezonesChat',
new_name='futbolChat',
),
]
|
AtenrevCode/scChat
|
chat/migrations/0004_auto_20170420_1028.py
|
Python
|
mit
| 394
|
""" """
# Standard library modules.
import unittest
import logging
# Third party modules.
import numpy as np
# Local modules.
from pyhmsa.spec.datum.analysislist import \
AnalysisList0D, AnalysisList1D, AnalysisList2D
from pyhmsa.spec.condition.condition import _Condition
# Globals and constants variables.
class TestAnalysisList0D(unittest.TestCase):
def setUp(self):
super().setUp()
self.datum = AnalysisList0D(3)
self.datum.conditions['Test'] = _Condition()
self.datum[0] = 5.0
def tearDown(self):
unittest.TestCase.tearDown(self)
def testskeleton(self):
self.assertEqual(3, len(self.datum))
self.assertEqual(3, self.datum.analysis_count)
self.assertEqual(1, len(self.datum.conditions))
self.assertAlmostEqual(5.0, self.datum[0, 0], 4)
self.assertEqual(3, self.datum.collection_dimensions['Analysis'])
def testtoanalysis(self):
analysis = self.datum.toanalysis(0)
self.assertAlmostEqual(5.0, analysis, 4)
self.assertEqual(1, len(analysis.conditions))
def testxlabel(self):
self.assertEqual('Analysis', self.datum.get_xlabel())
self.datum.set_xlabel("Test", 'cps/nA')
self.assertEqual('Test (cps/nA)', self.datum.get_xlabel())
def testylabel(self):
self.assertEqual('Values', self.datum.get_ylabel())
self.datum.set_ylabel("Test", 'cps/nA')
self.assertEqual('Test (cps/nA)', self.datum.get_ylabel())
class TestAnalysisList1D(unittest.TestCase):
def setUp(self):
super().setUp()
self.datum = AnalysisList1D(3, 5)
self.datum.conditions['Test'] = _Condition()
self.datum[0] = [1.0, 2.0, 3.0, 4.0, 5.0]
def tearDown(self):
unittest.TestCase.tearDown(self)
def testskeleton(self):
self.assertEqual(3, len(self.datum))
self.assertEqual(3, self.datum.analysis_count)
self.assertEqual(1, len(self.datum.conditions))
self.assertAlmostEqual(1.0, self.datum[0, 0], 4)
self.assertAlmostEqual(5.0, self.datum[0, -1], 4)
self.assertEqual(3, self.datum.collection_dimensions['Analysis'])
self.assertEqual(5, self.datum.datum_dimensions['Channel'])
def testtoanalysis(self):
analysis = self.datum.toanalysis(0)
self.assertAlmostEqual(1.0, analysis[0], 4)
self.assertAlmostEqual(5.0, analysis[-1], 4)
self.assertEqual(5, analysis.channels)
self.assertEqual(1, len(analysis.conditions))
class TestAnalysisList2D(unittest.TestCase):
def setUp(self):
super().setUp()
self.datum = AnalysisList2D(3, 5, 5)
self.datum.conditions['Test'] = _Condition()
self.datum[0] = np.ones((5, 5))
def tearDown(self):
unittest.TestCase.tearDown(self)
def testskeleton(self):
self.assertEqual(3, len(self.datum))
self.assertEqual(3, self.datum.analysis_count)
self.assertEqual(1, len(self.datum.conditions))
self.assertEqual(5, self.datum.u)
self.assertEqual(5, self.datum.v)
self.assertAlmostEqual(1.0, self.datum[0, 0, 0], 4)
self.assertEqual(3, self.datum.collection_dimensions['Analysis'])
self.assertEqual(5, self.datum.datum_dimensions['U'])
self.assertEqual(5, self.datum.datum_dimensions['V'])
def testtoanalysis(self):
analysis = self.datum.toanalysis(0)
self.assertAlmostEqual(1.0, analysis[0, 0], 4)
self.assertEqual(5, analysis.u)
self.assertEqual(5, analysis.v)
self.assertEqual(1, len(analysis.conditions))
if __name__ == '__main__': # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
pyhmsa/pyhmsa
|
pyhmsa/spec/datum/test_analysislist.py
|
Python
|
mit
| 3,730
|
from django.contrib import admin
from ..models.foo import Foo
admin.site.register(Foo)
|
yephper/django
|
tests/admin_scripts/complex_app/admin/foo.py
|
Python
|
bsd-3-clause
| 94
|
from __future__ import absolute_import
from . import submodule
# Just Dummy class for testing
class TestClassInSubPkg:
pass
|
asmodehn/filefinder2
|
tests/test_filefinder2/pkg/__init__.py
|
Python
|
mit
| 130
|
import os
import numpy as np
from scipy.optimize import curve_fit
def gauss(x, A, mu, sigma):
return A * np.exp(-(x - mu)**2 / (2. * sigma**2))
scriptmode = True
SDM_name = 'test' # The prefix to use for all output files
# SDM_name = '13A-213.sb20685305.eb20706999.56398.113012800924'
# Set up some useful variables (these will be altered later on)
msfile = SDM_name + '.ms'
hisplitms = SDM_name + '.hi.ms'
splitms = SDM_name + '.hi.src.split.ms'
contsubms = SDM_name + '.hi.src.split.ms.contsub'
rawcleanms = SDM_name + '.hi.src.split.ms.contsub.rawcleanimg'
cleanms = SDM_name + '.hi.src.split.ms.contsub.cleanimg'
pathname = os.environ.get('CASAPATH').split()[0]
pipepath = '/home/dcolombo/pipe_scripts/'
# pipepath = '/home/dario/pipe_scripts/'
source = 'SextansA'
# VOS stuff
vos_dir = '../vos/'
vos_proc = './'
vos_link = '../vos_link/'
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
# Find the 21cm spw and check if the obs
# is single pointing or mosaic
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
print "Find HI spw..."
# But first find the spw corresponding to it
tb.open(vos_dir + msfile + '/SPECTRAL_WINDOW')
freqs = tb.getcol('REF_FREQUENCY')
nchans = tb.getcol('NUM_CHAN')
tb.close()
spws = range(0, len(freqs))
# Select the 21cm
sel = np.where((freqs > 1.40 * 10**9) & (freqs < 1.43 * 10**9))
hispw = str(spws[sel[0][0]])
freq = freqs[sel[0][0]]
nchan = nchans[sel[0][0]]
print "Selected spw ", hispw, "with frequency ", freq, "and ", nchan, " channels"
print "Starting split the HI line"
# Mosaic or single pointing?
tb.open(vos_dir + msfile + '/FIELD')
names = tb.getcol('NAME')
tb.close()
moscount = 0
for name in names:
chsrc = name.find(source)
if chsrc != -1:
moscount = moscount + 1
if moscount > 1:
imagermode = "mosaic"
else:
imagermode = "csclean"
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Split the corrected source data from the rest
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
print "Starting source split..."
os.system('rm -rf ' + vos_proc + splitms)
default('split')
vis = vos_dir + hisplitms
outputvis = vos_proc + splitms
field = source
spw = ''
datacolumn = 'corrected'
keepflags = False
split()
print "Created splitted-source .ms " + splitms
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# UV continum subtraction
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# 1) Save a .txt file of the amplitude vs
# channels, plotms runs only to get the
# ASCII file
print "Estimating channels with signal..."
real_amps = []
imag_amps = []
default('visstat')
vis = vos_proc + splitms
field = '0'
datacolumn = 'data'
selectdata = True
useflags = False
for nc in range(nchan):
spw = '0:' + str(nc)
axis = 'real'
pdata = visstat()
real_amps.append(pdata['DATA']['mean'])
axis = 'imag'
pdata = visstat()
imag_amps.append(pdata['DATA']['mean'])
real_amps = np.asarray(real_amps)
imag_amps = np.asarray(imag_amps)
amps = np.sqrt(real_amps**2 + imag_amps**2)
chans = np.arange(nchan) + 1
# Guessing parameters for fitting
A = max(amps)
mu = chans[amps.tolist().index(A)]
hm = chans[amps > A / 2]
sigma = float(hm[-1] - hm[0]) / 2.35
opar, _ = curve_fit(gauss, chans, amps, p0=[A, mu, sigma])
# Move away to 3.5sigma for the fit, in order to exclude the data
# from the fit
chan1 = int(mu - 3.5 * opar[2])
chan2 = int(mu + 3.5 * opar[2])
fitspws = str(chan1) + '~' + str(chan2)
print "Signal within channels " + fitspws
print "Starting contsub..."
# Run the routinne
os.system('rm -rf ' + vos_proc + contsubms)
default('uvcontsub')
vis = vos_proc + splitms
fitspw = '0:' + fitspws
excludechans = True
solint = 0.0
fitorder = 0
fitmode = 'subtract'
splitdata = True
uvcontsub()
print "Created continum subtracted image" + contsubms
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# CLEANing
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
print "Starting CLEANing..."
os.system('rm -rf ' + vos_proc + rawcleanms + '*')
# First generate a 0-iterations
# image to estimate the noise level
# (threshold)
# Get max baseline and dish size
bline_max = au.getBaselineExtrema(vos_proc + splitms)[0]
tb.open(vos_proc + splitms + '/ANTENNA')
dishs = tb.getcol('DISH_DIAMETER')
dish_min = min(dishs)
tb.close()
# Find the beam
hi_lambda = 299792458.0 / (freq)
min_lambda = 299792458.0 / (min(freqs))
syn_beam = (hi_lambda / bline_max) * 180 / np.pi * 3600
prim_beam = (min_lambda / dish_min) * 180 / np.pi * 3600
# Setting CLEANing parameters
sel_cell = str(round(syn_beam / 5)) + 'arcsec'
sel_imsize = int(round(prim_beam / (syn_beam / 5)))
# Increase the sel_imsize of a couple of beam
# to be sure
dx = int(round(syn_beam / prim_beam * sel_imsize))
sel_imsize = sel_imsize + 1 * dx
# The image size should be a multiplier of
# 2, 3 and 5 to work well with clean so:
sel_imsize = sel_imsize - 1
pnum = 1 * sel_imsize
while pnum != 1:
sel_imsize = sel_imsize + 1
pnum = 1 * sel_imsize
while pnum % 2 == 0:
pnum = pnum / 2
while pnum % 3 == 0:
pnum = pnum / 3
while pnum % 5 == 0:
pnum = pnum / 5
print "Image size:", sel_imsize
print "Cell size:", sel_cell
# First generate a 0-iterations
# image to estimate the noise level
# (threshold)
default('clean')
vis = vos_proc + contsubms
imagename = vos_proc + rawcleanms
cell = [sel_cell, sel_cell]
imsize = [sel_imsize, sel_imsize]
imagermode = imagermode
mode = "channel"
nchan = 4
start = chan1 - 5
width = 1
field = '0'
spw = '0'
interactive = False
pbcor = False
minpb = 0.25
restfreq = '1.420405752GHz'
niter = 0
clean()
print "Estimating sigma..."
default('imstat')
imagename = vos_proc + rawcleanms + '.image'
chans = '0~3'
rawclean_stat = imstat()
rms = rawclean_stat['sigma'][0] * 1000
rms = round(rms)
rms = str(int(rms)) + 'mJy'
print "Sigma=", rms, ". Now the real CLEANing..."
# Now run the real cleaning
os.system('rm -rf ' + cleanms + '*')
default('clean')
vis = vos_proc + contsubms
imagename = vos_proc + cleanms
cell = [sel_cell, sel_cell]
imsize = [sel_imsize, sel_imsize]
imagermode = imagermode
mode = "channel"
start = chan1
nchan = chan2 - chan1
width = 1
field = ''
spw = ''
interactive = False
restfreq = '1.420405752GHz'
outframe = 'LSRK'
niter = 10000
threshold = rms
usescratch = True
clean()
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Moment maps 0,1,2
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
default("immoments")
imagename = vos_proc + cleanms + '.image'
moments = [0, 1, 2]
outfile = vos_proc + cleanms
immoments()
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Convert everything to fits file
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
print "Exporting the image fits..."
default('exportfits')
imagename = vos_proc + cleanms + '.image'
fitsimage = vos_proc + source + '_21cm.fits'
velocity = True
optical = False
overwrite = True
dropstokes = True
exportfits()
print "Exporting moment maps..."
default('exportfits')
# Moment 0
imagename = vos_proc + cleanms + '.integrated'
fitsimage = vos_proc + source + '_21cm_mom0.fits'
velocity = True
optical = False
overwrite = True
dropstokes = True
exportfits()
default('exportfits')
# Moment 1
imagename = vos_proc + cleanms + '.weighted_coord'
fitsimage = vos_proc + source + '_21cm_mom1.fits'
velocity = True
optical = False
overwrite = True
dropstokes = True
exportfits()
default('exportfits')
# Moment 2
imagename = vos_proc + cleanms + '.weighted_dispersion_coord'
fitsimage = vos_proc + source + '_21cm_mom2.fits'
velocity = True
optical = False
overwrite = True
dropstokes = True
exportfits()
|
e-koch/canfar_scripts
|
img_pipe/casanfar_image.py
|
Python
|
mit
| 7,517
|
# python
# This file is generated by a program (mib2py).
import CISCO_ISDNU_IF_MIB
OIDMAP = {
'1.3.6.1.4.1.9.9.18': CISCO_ISDNU_IF_MIB.ciscoIsdnuIfMIB,
'1.3.6.1.4.1.9.9.18.1': CISCO_ISDNU_IF_MIB.ciuIfObjects,
'1.3.6.1.4.1.9.9.18.1.1': CISCO_ISDNU_IF_MIB.ciuInterface,
'1.3.6.1.4.1.9.9.18.1.2': CISCO_ISDNU_IF_MIB.ciuIfExternalSTPort,
'1.3.6.1.4.1.9.9.18.1.3': CISCO_ISDNU_IF_MIB.ciuIfMIBNotificationEnables,
'1.3.6.1.4.1.9.9.18.2': CISCO_ISDNU_IF_MIB.ciuIfMIBNotificationPrefix,
'1.3.6.1.4.1.9.9.18.2.0': CISCO_ISDNU_IF_MIB.ciuIfMIBNotifications,
'1.3.6.1.4.1.9.9.18.3': CISCO_ISDNU_IF_MIB.ciuIfMIBConformance,
'1.3.6.1.4.1.9.9.18.3.1': CISCO_ISDNU_IF_MIB.ciuIfMIBCompliances,
'1.3.6.1.4.1.9.9.18.3.2': CISCO_ISDNU_IF_MIB.ciuIfMIBGroups,
'1.3.6.1.4.1.9.9.18.1.3.1': CISCO_ISDNU_IF_MIB.ciuIfEnableULoopStatusNotification,
'1.3.6.1.4.1.9.9.18.1.1.1.1.1': CISCO_ISDNU_IF_MIB.ciuIfType,
'1.3.6.1.4.1.9.9.18.1.1.2.1.1': CISCO_ISDNU_IF_MIB.ciuIfStatus,
'1.3.6.1.4.1.9.9.18.1.1.2.1.2': CISCO_ISDNU_IF_MIB.ciuIfEocCommand,
'1.3.6.1.4.1.9.9.18.1.1.2.1.3': CISCO_ISDNU_IF_MIB.ciuIfOverHeadBits,
'1.3.6.1.4.1.9.9.18.1.1.2.1.4': CISCO_ISDNU_IF_MIB.ciuIfFebeErrors,
'1.3.6.1.4.1.9.9.18.1.1.2.1.5': CISCO_ISDNU_IF_MIB.ciuIfNebeErrors,
'1.3.6.1.4.1.9.9.18.1.1.2.1.6': CISCO_ISDNU_IF_MIB.ciuIfLoopStatus,
'1.3.6.1.4.1.9.9.18.1.2.1.1.1': CISCO_ISDNU_IF_MIB.ciuIfExternalSTPortNumber,
'1.3.6.1.4.1.9.9.18.1.2.1.1.2': CISCO_ISDNU_IF_MIB.ciuIfExternalSTPortStatus,
'1.3.6.1.4.1.9.9.18.2.0.1': CISCO_ISDNU_IF_MIB.ciuIfLoopStatusNotification,
'1.3.6.1.4.1.9.9.18.3.2.1': CISCO_ISDNU_IF_MIB.ciuIfMIBGroup,
}
|
xiangke/pycopia
|
mibs/pycopia/mibs/CISCO_ISDNU_IF_MIB_OID.py
|
Python
|
lgpl-2.1
| 1,588
|
# $Id$
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --dis-codec=pcma --auto-answer=200"]
PJSUA_EXPECTS = []
|
asterisk/pjproject
|
tests/pjsua/scripts-sipp/uac-ticket-1866-reinv-after-failed-nego.py
|
Python
|
gpl-2.0
| 130
|
import copy
cards = [[0, 'Q'], [2, '6'], [1, 'K'],
[1, '8'], [2, '10'], [2, '4'],
[3, '4'], [0, '4'], [1, '3'],
[2, '5'], [0, 'K'], [3, 'A'],
[1, 'J'], [0, '3'], [0, '9']]
def cardTypeAsInt( card ):
if card[1].isdigit():
return int(card[1])
if card[1] == "J":
return 11
elif card[1] == "Q":
return 12
elif card[1] == "K":
return 13
else:
return 14
def compareCards( card1, card2 ):
print("porovnávám karty:", card1, card2)
if (card1[0] == card2[0]):
print("rovny")
if ( cardTypeAsInt( card1 ) < cardTypeAsInt( card2 ) ):
print("barva1")
return True
else:
print("barva2")
return False
else:
print("else")
return card1[0] < card2[0]
def bubbleSort( array, swap_fn ):
sorted = copy.deepcopy(array)
for i in range( len( sorted ) ):
while( swap_fn( sorted[i], sorted[i-1] ) ):
tmp = sorted[i-1]
sorted[i-1] = sorted[i]
sorted[i] = tmp
return sorted
print( cards )
print( bubbleSort( cards, compareCards) )
|
malja/cvut-python
|
cviceni08/01_trideni_karet.py
|
Python
|
mit
| 1,202
|
# -*- coding: utf-8 -*-
"""All prompters registered in this module must have a function signature of
(prompt, *args, **kwargs), and must return an answer. If a back event occurs, the
prompter should raise `QuestionnaireGoBack`.
Extending questionnaire is as simple writing your own prompter and passing it to
`add`.
"""
from __future__ import print_function
import sys
import curses
import os
import getpass
from contextlib import contextmanager
from pick import Picker
prompters = {}
class QuestionnaireGoBack(Exception):
"""Signals user went back instead of answering question."""
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def is_string(thing):
if sys.version_info < (3, 0):
return isinstance(thing, basestring)
return isinstance(thing, str)
def register(key='function'):
"""Add decorated functions to prompters dict.
"""
def decorate(func):
prompters[key] = func
return func
return decorate
@register(key='one')
def one(prompt, *args, **kwargs):
"""Instantiates a picker, registers custom handlers for going back,
and starts the picker.
"""
indicator = '‣'
if sys.version_info < (3, 0):
indicator = '>'
def go_back(picker):
return None, -1
options, verbose_options = prepare_options(args)
idx = kwargs.get('idx', 0)
picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx)
picker.register_custom_handler(ord('h'), go_back)
picker.register_custom_handler(curses.KEY_LEFT, go_back)
with stdout_redirected(sys.stderr):
option, index = picker.start()
if index == -1:
raise QuestionnaireGoBack
if kwargs.get('return_index', False):
# `one` was called by a special client, e.g. `many`
return index
return options[index]
@register(key='many')
def many(prompt, *args, **kwargs):
"""Calls `pick` in a while loop to allow user to pick many
options. Returns a list of chosen options.
"""
def get_options(options, chosen):
return [options[i] for i, c in enumerate(chosen) if c]
def get_verbose_options(verbose_options, chosen):
no, yes = ' ', '✔'
if sys.version_info < (3, 3):
no, yes = ' ', '@'
opts = ['{} {}'.format(yes if c else no, verbose_options[i]) for i, c in enumerate(chosen)]
return opts + ['{}{}'.format(' ', kwargs.get('done', 'done...'))]
options, verbose_options = prepare_options(args)
chosen = [False] * len(options)
index = kwargs.get('idx', 0)
default = kwargs.get('default', None)
if isinstance(default, list):
for idx in default:
chosen[idx] = True
if isinstance(default, int):
chosen[default] = True
while True:
try:
index = one(prompt, *get_verbose_options(verbose_options, chosen), return_index=True, idx=index)
except QuestionnaireGoBack:
if any(chosen):
raise QuestionnaireGoBack(0)
else:
raise QuestionnaireGoBack
if index == len(options):
return get_options(options, chosen)
chosen[index] = not chosen[index]
def prepare_options(options):
"""Create options and verbose options from strings and non-string iterables in
`options` array.
"""
options_, verbose_options = [], []
for option in options:
if is_string(option):
options_.append(option)
verbose_options.append(option)
else:
options_.append(option[0])
verbose_options.append(option[1])
return options_, verbose_options
@register(key='raw')
def raw(prompt, *args, **kwargs):
"""Calls input to allow user to input an arbitrary string. User can go
back by entering the `go_back` string. Works in both Python 2 and 3.
"""
go_back = kwargs.get('go_back', '<')
type_ = kwargs.get('type', str)
default = kwargs.get('default', '')
with stdout_redirected(sys.stderr):
while True:
try:
if kwargs.get('secret', False):
answer = getpass.getpass(prompt)
elif sys.version_info < (3, 0):
answer = raw_input(prompt)
else:
answer = input(prompt)
if not answer:
answer = default
if answer == go_back:
raise QuestionnaireGoBack
return type_(answer)
except ValueError:
eprint('\n`{}` is not a valid `{}`\n'.format(answer, type_))
@contextmanager
def stdout_redirected(to):
"""Lifted from: https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python
This is the only way I've found to redirect stdout with curses. This way the
output from questionnaire can be piped to another program, without piping
what's written to the terminal by the prompters.
"""
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
stdout.flush()
os.dup2(copied.fileno(), stdout_fd)
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError('Expected a file (`.fileno()`) or a file descriptor')
return fd
|
kylebebak/questionnaire
|
questionnaire/prompters.py
|
Python
|
mit
| 5,931
|
from django.contrib import admin
from .models import ProjectStatus, ProjectStatusAdmin, Gallery, GalleryAdmin, ProgramLinks,ProgramLinksAdmin, Link, LinkAdmin
class GalleryAdmin(admin.ModelAdmin):
change_form_template = 'customdashboard/admin/change_form.html'
admin.site.register(ProjectStatus, ProjectStatusAdmin)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(ProgramLinks, ProgramLinksAdmin)
admin.site.register(Link, LinkAdmin)
|
mercycorps/tola-activity
|
htdocs/customdashboard/admin.py
|
Python
|
gpl-2.0
| 464
|
import unittest
from openmdao.main.api import VariableTree, Component, Assembly, set_as_top
from openmdao.lib.datatypes.api import VarTree, Float, List
class RegionVT(VariableTree):
c0 = Float()
c1 = Float()
class SectionVT(VariableTree):
s0 = Float()
s1 = Float()
regions = List()
def add_region(self, name):
self.add(name, VarTree(RegionVT()))
self.regions.append(name)
class BladeVT(VariableTree):
def add_section(self, name):
self.add(name, VarTree(SectionVT()))
class SplinedRegion(Component):
region = VarTree(RegionVT(), iotype='out')
def execute(self):
#print 'computing splines for c0 and c1'
self.region.c0 = 1.0 #np.sin(np.linspace(0,1,10))
self.region.c1 = 2.0 #np.cos(np.linspace(0,1,10))
class BladeSection(Assembly):
section = VarTree(SectionVT(), iotype='out')
def __init__(self, nr):
super(BladeSection, self).__init__()
for i in range(nr):
name = 'region%02d' % i
#print 'adding region ', name
self.add(name, SplinedRegion())
self.driver.workflow.add(name)
self.section.add_region(name)
self.connect(name + '.region', 'section.' + name)
class BladeStructure(Assembly):
st3d = VarTree(BladeVT(), iotype='out')
def configure(self):
self.add('section0', BladeSection(5))
self.driver.workflow.add('section0')
self.st3d.add_section('section0')
# Copy vartree before connecting to make sure it matches.
self.st3d.section0 = self.section0.section.copy()
self.connect('section0.section', 'st3d.section0')
class Builder(Component):
st3d = VarTree(BladeVT(), iotype='in')
def execute(self):
for region in self.st3d.section0.regions:
#print region, getattr(self.st3d.section0, region)
getattr(self.st3d.section0, region)
class Blade(Assembly):
def configure(self):
self.add('blade_st', BladeStructure())
self.add('builder', Builder())
self.driver.workflow.add(['blade_st','builder'])
# Copy vartree before connecting to make sure it matches.
self.builder.st3d = self.blade_st.st3d.copy()
self.connect('blade_st.st3d', 'builder.st3d')
class VTreeCopyTestCase(unittest.TestCase):
def test_copy(self):
top = set_as_top(Blade())
top.run() # this raised an Exception when the bug was present
if __name__ == '__main__':
unittest.main()
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_vartree_copy.py
|
Python
|
gpl-2.0
| 2,557
|
from mock import Mock
import pytest
@pytest.fixture
def dummy_input_layer():
from lasagne.layers.input import InputLayer
input_layer = InputLayer((2, 3, 4))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock
|
jordipons/EUSIPCO2017
|
src/lasagne/tests/layers/conftest.py
|
Python
|
mit
| 339
|
from pyqtgraph.Qt import QtGui, QtCore
__all__ = ['JoystickButton']
class JoystickButton(QtGui.QPushButton):
sigStateChanged = QtCore.Signal(object, object) ## self, state
def __init__(self, parent=None):
QtGui.QPushButton.__init__(self, parent)
self.radius = 200
self.setCheckable(True)
self.state = None
self.setState(0,0)
self.setFixedWidth(50)
self.setFixedHeight(50)
def mousePressEvent(self, ev):
self.setChecked(True)
self.pressPos = ev.pos()
ev.accept()
def mouseMoveEvent(self, ev):
dif = ev.pos()-self.pressPos
self.setState(dif.x(), -dif.y())
def mouseReleaseEvent(self, ev):
self.setChecked(False)
self.setState(0,0)
def wheelEvent(self, ev):
ev.accept()
def doubleClickEvent(self, ev):
ev.accept()
def getState(self):
return self.state
def setState(self, *xy):
xy = list(xy)
d = (xy[0]**2 + xy[1]**2)**0.5
nxy = [0,0]
for i in [0,1]:
if xy[i] == 0:
nxy[i] = 0
else:
nxy[i] = xy[i]/d
if d > self.radius:
d = self.radius
d = (d/self.radius)**2
xy = [nxy[0]*d, nxy[1]*d]
w2 = self.width()/2.
h2 = self.height()/2
self.spotPos = QtCore.QPoint(w2*(1+xy[0]), h2*(1-xy[1]))
self.update()
if self.state == xy:
return
self.state = xy
self.sigStateChanged.emit(self, self.state)
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
p = QtGui.QPainter(self)
p.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0)))
p.drawEllipse(self.spotPos.x()-3,self.spotPos.y()-3,6,6)
def resizeEvent(self, ev):
self.setState(*self.state)
QtGui.QPushButton.resizeEvent(self, ev)
if __name__ == '__main__':
app = QtGui.QApplication([])
w = QtGui.QMainWindow()
b = JoystickButton()
w.setCentralWidget(b)
w.show()
w.resize(100, 100)
def fn(b, s):
print("state changed:", s)
b.sigStateChanged.connect(fn)
## Start Qt event loop unless running in interactive mode.
import sys
if sys.flags.interactive != 1:
app.exec_()
|
ibressler/pyqtgraph
|
pyqtgraph/widgets/JoystickButton.py
|
Python
|
mit
| 2,468
|
#!/usr/bin/env python
"""
Select only the longest transcript for each gene in a GFF file.
Usage:
select_longest_mrna.py -i <input_GFF_file> -o <output_GFF_file>
Options:
-h --help Show this screen.
--version Show version.
-i <input_GFF_file> Input GFF file.
-o <output_GFF_file> Output GFF file.
"""
import sys
print
modules = ["docopt", "os"]
exit_flag = False
for module in modules:
try:
__import__(module)
except ImportError:
exit_flag = True
sys.stderr.write("Error: Python module " + module + " is not installed.\n")
if exit_flag:
sys.stderr.write("You can install these modules with a command: pip install <module>\n")
sys.stderr.write("(Administrator privileges may be required.)\n")
sys.exit(1)
from docopt import docopt
from os.path import exists
from os.path import isfile
from re import search
from os import remove
def filter_gff(input_gff_filename, output_gff_filename):
print 'Input GFF file: ', input_gff_filename
print 'Output GFF file: ', output_gff_filename
print
print 'Scan GFF to find the longest transcript for each gene...'
with open(input_gff_filename, 'r') as input_gff_file:
mrna_processed = 0
max_lens = dict() # key: gene_id, value: longest mrna length
max_mrna_id = dict() # key: gene_id, value: longest mrna id
for record in input_gff_file:
if record.startswith('#') or record == '\n':
continue
record_fields = record.split()
record_type = record_fields[2]
record_comment = ''.join(record_fields[8:])
if record_type == 'mRNA' or record_type == 'mrna':
mrna_processed += 1
if mrna_processed % 100 == 0:
print mrna_processed, 'mRNA processed.'
gene_id = search(r'Parent=([a-zA-Z0-9]*)', record_comment).group(1)
mrna_id = search(r'ID=([a-zA-Z0-9]*)', record_comment).group(1)
mrna_start = int(record_fields[3])
mrna_end = int(record_fields[4])
mrna_len = mrna_end - mrna_start + 1
if gene_id in max_lens:
if mrna_len > max_lens[gene_id]:
max_lens[gene_id] = mrna_len
max_mrna_id[gene_id] = mrna_id
else:
max_lens[gene_id] = mrna_len
max_mrna_id[gene_id] = mrna_id
print 'Finished.'
max_mrna_id_list = [value for key, value in max_mrna_id.items()]
max_mrna_id_set = set(max_mrna_id_list)
print 'Exclude mRNA, exon, and CDS records corresponding to the excluded mRNA ID list...',
with open(output_gff_filename, 'w') as output_file, \
open(input_gff_filename, 'r') as input_file:
for record in input_file:
if record.startswith('#') or record == '\n':
output_file.write(record)
continue
record_fields = record.split()
record_type = record_fields[2]
record_comment = ''.join(record_fields[8:])
if record_type == 'mRNA' or record_type == 'mrna':
mrna_id = search(r'ID=([a-zA-Z0-9]*)', record_comment).group(1)
elif record_type == 'CDS' or record_type == 'exon':
mrna_id = search(r'Parent=([a-zA-Z0-9]*)', record_comment).group(1)
else:
output_file.write(record)
continue
if mrna_id not in max_mrna_id_set:
continue
output_file.write(record)
print 'Finished.\n'
print 'Finished selecting.'
print mrna_processed, 'mRNA records were processed.'
if __name__ == '__main__':
arguments = docopt(__doc__, version='select_longest_mrna 0.1')
input_gff_filename = arguments["-i"]
if not exists(input_gff_filename):
print "Error: Can't find an input GFF file: no such file '" + \
input_gff_filename + "'. Exit.\n"
sys.exit(1)
if not isfile(input_gff_filename):
print "Error: Input GFF file must be a regular file. " + \
"Something else given. Exit.\n"
sys.exit(1)
output_gff_filename = arguments["-o"].rstrip('/')
filter_gff(input_gff_filename, output_gff_filename)
|
sidorov-si/ngser
|
select_longest_mrna.py
|
Python
|
gpl-2.0
| 4,346
|
"""client: basic interaction with a daemon"""
|
RoPython/jarvis
|
jarvis/worker/client.py
|
Python
|
mit
| 46
|
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import logging
import os
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.UI.NetzobAbstractView import NetzobAbstractView
class ApplicativeDataManagerView(NetzobAbstractView):
def __init__(self, controller):
gladeFile = os.path.join("traceManager", "applicativeDataManagerDialog.glade")
super(ApplicativeDataManagerView, self).__init__(controller, gladeFile, root="applicativeDataManagerWindow", parent=None)
self._getObjects(['applicativeDataTreeStore', 'applicativeDataTreeView', 'importApplicationDataDialog'])
self.logger = logging.getLogger(__name__)
self.refresh()
def refresh(self):
self.applicativeDataTreeStore.clear()
# Fullfill the treeview with current applicative data of the session
for applicativeData in self.getController().getSession().getApplicativeData():
self.applicativeDataTreeStore.append([str(applicativeData.getID()), applicativeData.getName(), applicativeData.getType(), applicativeData.getValue()])
def getSelectedApplicativeData(self):
"""getSelectedApplicativeData:
Computes user's selection on applicative data treestore and retrives the associated
applicativeData by their ID.
@return the list of selected applicative data (list can be empty)"""
(model, rows) = self.applicativeDataTreeView.get_selection().get_selected_rows()
selectedApplicativeData = []
if rows is not None:
for row in rows:
applicativeData = self.controller.getSession().getApplicativeDataByID(model[row][0])
if applicativeData is not None:
selectedApplicativeData.append(applicativeData)
return selectedApplicativeData
|
nagyistoce/netzob
|
src/netzob/UI/TraceManager/Views/ApplicativeDataManagerView.py
|
Python
|
gpl-3.0
| 4,007
|
"""Extract reference documentation from the NumPy source tree.
Adapted from example here:
https://github.com/richford/cloudknot/tree/master/doc/sphinxext/docscrape.py
Authors
-------
* richford
* arokem
"""
#
# Original Copyright (c) 2017 Adam Richie-Halford, Ariel Rokem. All rights reserved.
#
from __future__ import division, absolute_import, print_function
import inspect
import textwrap
import re
import pydoc
from warnings import warn
import collections
import sys
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(collections.Mapping):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Yields': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def __iter__(self):
return iter(self._parsed_data)
def __len__(self):
return len(self._parsed_data)
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
sections = list(self._read_sections())
section_names = set([section for section, content in sections])
has_returns = 'Returns' in section_names
has_yields = 'Yields' in section_names
# We could do more tests, but we are not. Arbitrarily.
if has_returns and has_yields:
msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
for (section, content) in sections:
if not section.startswith('..'):
section = (s.capitalize() for s in section.split(' '))
section = ' '.join(section)
if section in ('Parameters', 'Returns', 'Yields', 'Raises',
'Warns', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
if param_type:
out += ['%s : %s' % (param, param_type)]
else:
out += [param]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Yields',
'Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
if sys.version_info[0] >= 3:
argspec = inspect.getfullargspec(func)
else:
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
self.show_inherited_members = config.get(
'show_inherited_class_members', True)
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append((name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, collections.Callable)
and self._is_show_member(name))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if (not name.startswith('_') and
(func is None or isinstance(func, property) or
inspect.isgetsetdescriptor(func))
and self._is_show_member(name))]
def _is_show_member(self, name):
if self.show_inherited_members:
return True # show all class members
if name not in self._cls.__dict__:
return False # class member is inherited, we do not show it
return True
|
ccurtis7/brain-diffusion
|
docs/source/sphinxext/docscrape.py
|
Python
|
bsd-2-clause
| 17,797
|
__author__ = 'Francesco Infante'
import ujson
from dpath import util
class Configuration(dict):
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
class Path(str):
def __init__(self, *args, **kwargs):
super(Path, self).__init__(*args, **kwargs)
def extract_from_tuple(data, path):
"""
Args:
data (tuple): tuple of records
path (Path): attribute to extract
Returns:
tuple: one attribute for record
Example:
data: ({'a':2, 'b':3}, {'a':1, 'b':2})
path: 'a'
returns: (2, 1)
"""
result = []
for x in data:
try:
result.append(util.get(x, path))
except:
result.append(None)
return tuple(result)
class JSONSource(object):
"""
Read from a file where each line is a document represented as a JSON object.
Args:
filename (str)
"""
def __init__(self, filename):
self.file = open(filename)
def next(self):
return ujson.loads(self.file.next())
def __iter__(self):
return self
|
francescoinfante/identity
|
identity/common.py
|
Python
|
lgpl-3.0
| 1,124
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-28 15:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20171121_2210'),
]
operations = [
migrations.AlterField(
model_name='projectusers',
name='status',
field=models.IntegerField(choices=[(0, 'Invited'), (1, 'Pending'), (2, 'Active')], default=0),
),
]
|
overture-stack/enrolment
|
enrolment-service/enrol/core/migrations/0003_auto_20171128_1516.py
|
Python
|
agpl-3.0
| 510
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from django.contrib.contenttypes.models import ContentType
from django.core import urlresolvers
from django.db import models
DEFAULT_WIDTH = 300
DEFAULT_MAX_HEIGHT = 600
class DjangoAdminDialog(models.Model):
class Meta:
verbose_name = 'DjangoAdminDialog'
verbose_name_plural = 'DjangoAdminDialogs'
unique_together = ('url', 'element_id',)
url = models.CharField(max_length=255, blank=False, null=False)
element_id = models.CharField(max_length=255, blank=False, null=False)
title = models.CharField(max_length=255, blank=True, null=False)
body = models.TextField(blank=False, null=False)
active = models.BooleanField(blank=True, null=False, default=True)
width = models.SmallIntegerField(blank=True, null=False, default=DEFAULT_WIDTH)
max_height = models.SmallIntegerField(blank=True, null=False, default=DEFAULT_MAX_HEIGHT)
def __unicode__(self):
return "{} for {}".format(self.element_id, self.url)
def get_admin_url(self):
content_type = ContentType.objects.get_for_model(self.__class__)
return urlresolvers.reverse("admin:%s_%s_change" % (content_type.app_label, content_type.model), args=(self.id,))
|
quiqueporta/django-admin-dialog
|
django_admin_dialog/models.py
|
Python
|
gpl-2.0
| 1,297
|
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing code for caching all parsers
Description
-----------
This module is an import caching facility used for the I/O package in ConKit.
To allow fast access to individual modules required for :func:`read <conkit.io.read>`, :func:`write <conkit.io.write>`
and :func:`convert <conkit.io.convert>` functions, we don't want to import everything every time.
Thus, we only cache the results to ultimately import the bits we really require.
"""
__author__ = "Felix Simkovic"
__date__ = "19 Jun 2017"
__version__ = "0.13.1"
import collections
import copy
import glob
import importlib
import os
import re
RE_CLASS_DECLARATION = re.compile(r"^class\s+([A-Za-z0-9]+)\s*(.*):$")
CacheObj = collections.namedtuple("CacheObj", ["id", "module", "object", "group"])
class ParserCache(object):
"""Cache to hold handlers to each file parser"""
# This mask is for backward-compatibility and extensions to avoid re-writing the same algorithms
MASKS = {
"a2m": ["a2m", "jones"],
"a3m": ["a3m", "a3m-inserts"],
"aleigen": ["aleigen"],
"casp": ["casp", "casprr"],
"mapalign": ["mapalign"],
"pcons": ["flib", "pconsc", "pconsc2", "pconsc3", "saint2"],
"psicov": ["psicov", "metapsicov", "nebcon"],
}
BLINDFOLD = {"ContactFileParser", "GenericStructureParser", "DistanceFileParser", "SequenceFileParser"}
def __init__(self):
self._parsers = []
self.__construct()
def __contains__(self, item):
return item in self.file_parsers
def __getitem__(self, item):
return self.file_parsers.get(item, None)
def __repr__(self):
nparsers = len(self.contact_file_parsers) + len(self.sequence_file_parsers)
return "{}(nparsers={})".format(self.__class__.__name__, nparsers)
@property
def contact_file_parsers(self):
return {c.id: c for c in self._parsers if c.group in ["ContactFileParser"]}
@property
def distance_file_parsers(self):
return {c.id: c for c in self._parsers if c.group in
["DistanceFileParser", "GenericStructureParser", "BinaryDistanceFileParser"]}
@property
def sequence_file_parsers(self):
return {c.id: c for c in self._parsers if c.group in ["SequenceFileParser"]}
@property
def binary_file_formats(self):
return {c.id: c for c in self._parsers if c.group in ["BinaryDistanceFileParser"]}
@property
def file_parsers(self):
return {c.id: c for c in self._parsers}
def __construct(self):
path = os.path.abspath(os.path.dirname(__file__))
for m in glob.glob(os.path.join(path, "[!_]*.py")):
with open(m, "r") as f_in:
lines = [RE_CLASS_DECLARATION.match(l.strip()) for l in f_in if RE_CLASS_DECLARATION.match(l.strip())]
for match in lines:
object_ = match.group(1)
ggroup = match.group(2)
if ggroup and ggroup.startswith("(") and ggroup.endswith(")"):
group = ggroup.replace("(", "").replace(")", "")
else:
group = "Ungrouped"
name = os.path.basename(m).replace(".py", "")
module = "conkit.io." + name
objname = object_.lower().replace("parser", "")
if object_ in ParserCache.BLINDFOLD:
continue
elif objname in ParserCache.MASKS:
self._parsers.extend(
[CacheObj(extra, module, object_, group) for extra in ParserCache.MASKS[objname]]
)
else:
self._parsers.append(CacheObj(objname, module, object_, group))
def import_module(self, format):
return importlib.import_module(self[format].module)
def import_class(self, format):
return getattr(self.import_module(format), PARSER_CACHE[format].object)
# Only allow this to be seen from outside
PARSER_CACHE = ParserCache()
|
rigdenlab/conkit
|
conkit/io/_cache.py
|
Python
|
bsd-3-clause
| 5,592
|
# -*- coding: utf-8 -*-
import unittest
import phandim
class TestPhanDim(unittest.TestCase):
"""
Unit tests to check phantom dimensions
"""
def test_constructor1(self):
bx = [1,2,3,4,5]
by = [3,2,5,1,6]
bz = [8,5,23,9,4,3]
pd = phandim.phandim(bx, by, bz)
self.assertTrue(phandim.phandim.check_sorted(pd.bx()))
self.assertTrue(phandim.phandim.check_sorted(pd.by()))
self.assertTrue(phandim.phandim.check_sorted(pd.bz()))
def test_constructor2(self):
bx = None
by = [3,2,5,1,2]
bz = [8,5,23,8,4,3]
with self.assertRaises(RuntimeError):
phandim.phandim(bx, by, bz)
def test_constructor3(self):
bx = [3,2,5,1,2]
by = None
bz = [8,5,23,8,4,3]
with self.assertRaises(RuntimeError):
phandim.phandim(bx, by, bz)
def test_constructor4(self):
bx = [3,2,5,1,2]
by = [8,5,23,8,4,3]
bz = None
with self.assertRaises(RuntimeError):
phandim.phandim(bx, by, bz)
def test_access1(self):
bx = [3,2,5,1,2]
by = [8,5,23,8,4,3]
bz = [8,5,23,9,4,3,90]
pd = phandim.phandim(bx, by, bz)
self.assertTrue( pd.nx() == len(bx)-1 )
self.assertTrue( pd.ny() == len(by)-1 )
self.assertTrue( pd.nz() == len(bz)-1 )
if __name__ == '__main__':
unittest.main()
|
Iwan-Zotow/runEGS
|
XcMCCore_Tests/test_phandim.py
|
Python
|
apache-2.0
| 1,516
|
"""
Universidad Nacional de La Matanza
Catedra Lenguajes y Compiladores - 2013
Mariano Francischini, Alejandro Giorgi, Roberto Bravo
TP Compilador - Analizador Lexico
"""
# coding=utf8
import re
from ctypes import c_float
tokens = [
'ID',
'OP_AS',
'OP_SUMA',
'PR_WHILE',
'OP_DISTINTO',
'CTE_ENT',
'CTE_REAL',
'CTE_STRING',
'FIN_LINEA',
'ABRE_BLOQUE',
'CIERRA_BLOQUE',
'PR_IF',
'PR_ELSE',
'DOS_PUNTOS',
'OP_MAYOR',
'OP_MAYORIGUAL',
'OP_MENOR',
'OP_MENORIGUAL',
'OP_MUL',
'OP_DIV',
'OP_RESTA',
'OP_IGUALDAD',
'OP_RESTO',
'PR_AND',
'PR_OR',
'PR_INT',
'PR_FLOAT',
'PR_DEC',
'PR_ENDEC',
'PR_DEF',
'PR_RETURN',
'PR_STRING',
'PR_TECLA',
'PR_BREAK',
'PR_STDIN',
'PR_CONTINUE',
'PAREN_ABRE',
'PAREN_CIERRA',
'COMA',
'PR_PRINT',
'PR_PRINTC',
'PR_PRINTNL',
'PR_BETWEEN',
'PR_PERCENT',
]
class Token(object):
def __init__(self, type, value):
self.type = type
self.value = value
self.lineno = None
self.lexpos = None
def __repr__(self):
return "<Token: %s, %s>" % (self.type, self.value.strip("\n"))
class Val(object):
""" Reg Exps """
CUALQUIER = "."
""" Estados automata """
E_FINAL = "F"
E_FIN_LINEA = "2"
""" Cotas """
MAX_CTE_STRING = 120
MIN_CTE_ENT = -32768
MAX_CTE_ENT = 32767
MAX_TAM_ID = 25
class Lexer(object):
"""
YYLEX
Analizador Lexico.
Automata finito de Terminales
"""
def __init__(self):
self.nivel_bloques = [0] # Nivel de tab del bloque actual
self.nivel_espacios_sentencia = 0 # Nivel de tab de la sentencia actual
"""Cuando se descubren varios tokens juntos se los envia \
a esta cola para irlos devolviendo en sucesivas llamadas
"""
self.cola_tokens = []
from automata import matriz
self.matriz = matriz
def input(self, text):
""" Metodo requerido por yyparse """
# Se apendea una marca de finde de fuente
self.text = text.strip('\n ') + "\x00"
self.generate = self.generator()
def iterar_estado(self, estado_actual, input_char):
for (simbolo, accion) in estado_actual.items():
if accion[0] == Val.E_FINAL:
self.estado = "0"
accion[3](self, simbolo)
return Token(type=accion[1], value=self.cadena[0:-1])
elif re.match(simbolo, input_char) is not None:
if accion[2] and re.match(accion[2], input_char):
continue # es un excepto, entonces continue
resultado = accion[3](self, simbolo)
if resultado is not None:
self.estado = "0"
return resultado
self.estado = accion[0]
return "NEXT" # Se pide el proximo caracter
# Fin de archivo
# Revisamos si es necesario cerrar bloques abiertos
tokens = []
if len(self.nivel_bloques) > 1:
for _ in self.nivel_bloques[1:]: # se ignora el primero (nivel 0)
token = Token(type="CIERRA_BLOQUE", value="}\n")
tokens.append(token)
fin_archivo = Token(type="$end", value="")
tokens.append(fin_archivo)
return tokens # Devolvemos todos los cierres juntos + fin de archivo
def generator(self):
"""
Automata
"""
self.estado = "0" # estado actual del automata. Inicial: cero
self.cadena = "" # Cadena que se acumula a medida que entran caracteres
i = 0
while i < len(self.text):
""" Primero nos fijamos si hay tokens encolados"""
if len(self.cola_tokens):
yield self.cola_tokens.pop()
continue
"""
Itera caracter por caracter
"""
input_char = self.text[i]
if self.estado == '0' and input_char == ' ':
""" Ignormos espacios como inicio de un token """
i += 1
continue
if input_char == '\r':
""" Se ignoran completamente esos caracteres """
i += 1
continue
self.cadena += input_char
estado_actual = self.matriz[self.estado]
""" Avanza por la matriz de estados """
token = self.iterar_estado(estado_actual, input_char)
if token == "NEXT":
""" Cuando se necesita consumir mas
input_char para determinar el token
"""
i += 1
continue
elif token == "IGNORE":
""" Por ej los comentarios"""
self.cadena = ""
continue
elif token == "ENCOLADOS":
""" Por ej cuando se encuentran
varios CIERRA_BLOQUE juntos
"""
self.cadena = ""
continue
elif isinstance(token, Token) and token.type == 'ID':
### Cotas de ID ####
if len(token.value) > Val.MAX_TAM_ID:
raise TypeError("ID supera cota maxima (%s): %s"
% (Val.MAX_TAM_ID, token.value))
"""
#######################
Palabras reservadas PR_
#######################
"""
if token.value == 'if':
token = Token(type="PR_IF", value="if")
elif token.value == 'while':
token = Token(type="PR_WHILE", value="while")
elif token.value == 'int':
token = Token(type="PR_INT", value="int")
elif token.value == 'float':
token = Token(type="PR_FLOAT", value="float")
elif token.value == 'dec':
token = Token(type="PR_DEC", value="dec")
elif token.value == 'endec':
token = Token(type="PR_ENDEC", value="endec")
elif token.value == 'def':
token = Token(type="PR_DEF", value="def")
elif token.value == 'return':
token = Token(type="PR_RETURN", value="return")
elif token.value == 'string':
token = Token(type="PR_STRING", value="string")
elif token.value == 'print':
token = Token(type="PR_PRINT", value="print")
elif token.value == 'between':
token = Token(type="PR_BETWEEN", value="between")
elif token.value == 'percent':
token = Token(type="PR_PERCENT", value="percent")
elif token.value == 'else':
token = Token(type="PR_ELSE", value="else")
elif token.value == 'tecla':
token = Token(type="PR_TECLA", value="tecla")
elif token.value == 'break':
token = Token(type="PR_BREAK", value="break")
elif token.value == 'printc':
token = Token(type="PR_PRINTC", value="printc")
elif token.value == 'stdin':
token = Token(type="PR_STDIN", value="stdin")
elif token.value == 'printnl':
token = Token(type="PR_PRINTNL", value="printnl")
elif token.value == 'continue':
token = Token(type="PR_CONTINUE", value="continue")
self.cadena = ""
# retorno del/de los token/s
if isinstance(token, list):
for tk in token:
yield tk
else:
yield token
def token(self):
try:
token = self.generate.next()
print token
return token
except StopIteration:
return None
"""
Metodos de acciones ejecutadas en cada estado del automata
"""
def acc_NADA(self, simbolo):
pass
def acc_RESET_NIVEL_SENTENCIA(self, simbolo):
self.nivel_espacios_sentencia = 0
##########################################################
# Cotas
def acc_CTE_STRING(self, simbolo):
# Ignoramos las comillas de abrir y cerrar
self.cadena = self.cadena[1:-1]
if len(self.cadena) > Val.MAX_CTE_STRING:
raise TypeError("CTE_STRING muy larga. Limite %s, largo: %s" % (Val.MAX_CTE_STRING, len(self.cadena)))
def acc_CTE_ENT(self, simbolo):
entero = int(self.cadena[:-1])
if entero > Val.MAX_CTE_ENT or entero < Val.MIN_CTE_ENT:
raise TypeError("CTE_ENT fuera de rango: %s a %s" % (Val.MAX_CTE_ENT, Val.MIN_CTE_ENT))
def acc_CTE_REAL(self, simbolo):
real = float(self.cadena[:-1])
# Validacion verdadera contra un flotante de C.
if str(c_float(real).value) == 'inf':
raise TypeError("CTE_REAL %s fuera de rango: " % real)
##########################################################
def acc_FIN_LINEA(self, simbolo):
if simbolo == " ": # Bloque (tab)
self.nivel_espacios_sentencia += 1
else:
# [-1] es el ultimo elemento de la lista
if self.nivel_bloques[-1] < self.nivel_espacios_sentencia:
self.nivel_bloques.append(self.nivel_espacios_sentencia),
token = Token(type="ABRE_BLOQUE", value=" {\n")
elif self.nivel_bloques[-1] > self.nivel_espacios_sentencia:
bloque = self.nivel_bloques.pop()
while bloque != self.nivel_espacios_sentencia:
token = Token(type="CIERRA_BLOQUE", value="}\n")
self.cola_tokens.append(token)
bloque = self.nivel_bloques.pop()
""" Si consumio todos, agregamos el nivel 0"""
self.nivel_bloques.append(bloque) # Agrego el ultimo bloque
return "ENCOLADOS"
else:
token = Token(type="FIN_LINEA", value="\n") # Se ignoran los epacios de tabulacion
# si no cambian el bloque
self.nivel_espacios_sentencia = 0 # Reset nivel
return token
return None
def acc_COMENTARIO(self, simbolo):
return "IGNORE"
|
xbx/compilador_yacc
|
lexer.py
|
Python
|
mit
| 10,492
|
#####################################################################################
# #
# Script to update Hostname #
# #
# Usage : wsadmin -lang jython -f updateHostName.py <node name > < host name > #
# #
#####################################################################################
def updateHostName(nodename,hostname):
nlist = AdminConfig.list('ServerIndex')
attr=[["hostName", hostname ]]
AdminConfig.modify(nlist,attr)
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName BOOTSTRAP_ADDRESS -host '+ hostname +' -port 2809 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_MUTUALAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9202 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9201 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName ORB_LISTENER_ADDRESS -host '+ hostname +' -port 9900 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SAS_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9901 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SOAP_CONNECTOR_ADDRESS -host '+ hostname +' -port 8878 -modifyShared true]')
AdminConfig.save()
updateHostName(sys.argv[0], sys.argv[1])
|
davidcurrie/ci.docker.websphere-traditional
|
network-deployment/appserver/updateHostName.py
|
Python
|
apache-2.0
| 1,804
|
from .iview import IView, IViewModel
import asyncio
import websockets
import subprocess
import os
from .model import Model
import json
from collections import OrderedDict
class WebView(IView):
"""
Web-based browser window.
"""
def __init__(self, address, port):
super(WebView, self).__init__()
self.address = address
self.port = port
def update(self):
# open internet browser window with JS code here
page = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'webviewclient.html')
subprocess.call(['open', page])
class WebViewModel(IViewModel):
def __init__(self, model, view):
super(WebViewModel, self).__init__(model=model, view=view)
self.address = view.address
self.port = view.port
self.json_str = ''
def build(self):
print('Building WebViewModel')
items = []
if self.model is not None and self.model.items is not None:
for fname, tree in self.model.items.items():
line = OrderedDict()
line['Filename'] = fname
for key, value in tree.items():
line[key] = value
items.append(line)
self.json_str = json.dumps(items)
print('Composed JSON string: ' + self.json_str)
async def hello(self, websocket, path):
while True:
print('Processing incoming message on server')
arg = await websocket.recv()
print('Message on server: ' + arg)
self.model = Model(arg, self.model.select_tags)
self.model.viewmodels.append(self)
print('Found %d items' % (len(self.model.items)))
self.build()
# await websocket.send('clear')
await websocket.send(self.json_str)
# for line in self.items:
# await websocket.send(line)
print('Finish')
def start_server(self):
start_server_async_function = websockets.serve(self.hello, self.address, self.port)
asyncio.get_event_loop().run_until_complete(start_server_async_function)
print('Started server')
def run_forever(self):
loop = asyncio.get_event_loop()
loop.run_forever()
print("Eternal loop interrupted")
|
pdyban/dicombrowser
|
dicomviewer/webview.py
|
Python
|
apache-2.0
| 2,327
|
'''
SAMPLE INPUT
Hello World
PESSE ACM
Good Day
OUTPUT
World Hello
ACM PESSE
Day Good
'''
import sys
for line in sys.stdin:
line=line.strip("\n")
s=list(line.split(' '))
s.reverse()
for x in s:
print x,
print
|
yusufshakeel/Python-Project
|
example/commanLine_read_file.py
|
Python
|
mit
| 219
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
from math import *
TESTPREAMBLE()
try:
# DEFAULT CONSTRUCTOR AND STRING CONVERTER
print("test 0 : default constructor and string converter")
# Default constructor
matrix0 = ComplexMatrix()
# String converter
print("matrix0 = ", repr(matrix0))
# CONSTRUCTOR WITH SIZE, OPERATOR() AND STRING CONVERTER
print(
"test number one : constructor with size, operator() and string converter")
# Constructor with size
matrix1 = ComplexMatrix(2, 2)
# Check operator() methods
matrix1[0, 0] = 1. + 1j
matrix1[1, 0] = 2. + 4j
matrix1[0, 1] = 3. - 1j
matrix1[1, 1] = 4.
# String converter
print("matrix1 = ", repr(matrix1))
# COPY CONSTRUCTOR AND STRING CONVERTER
print("test 2 : copy constructor and string converter")
# Copy constructor
matrix2 = ComplexMatrix(matrix1)
# String converter
print("matrix2 = ", repr(matrix2))
# GET DIMENSIONS METHODS
print("test 3 : dimension methods")
# Get dimension methods
print("matrix1's nbRows = ", matrix1.getNbRows())
print("matrix1's nbColumns = ", matrix1.getNbColumns())
# CONSTRUCTOR WITH COLLECTION
print("test 4 : constructor with collection method")
# Create the collection of values
elementsValues = NumericalComplexCollection()
elementsValues.add(1. - 1j)
elementsValues.add(2. - 1j)
elementsValues.add(3. - 1j)
elementsValues.add(4. + 1j)
elementsValues.add(5. + 1j)
elementsValues.add(6. + 1j)
# Check the content of the collection
print("elementsValues = ", repr(elementsValues))
# Check the constructor with collection
matrix0bis = ComplexMatrix(2, 2, elementsValues)
print("matrix0bis = ", repr(matrix0bis))
# TRANSPOSITION METHOD AND CONJUGATE METHOD
print("test 5 : transposition / conjugate method")
# Check transpose method
matrix4 = matrix1.transpose()
matrix5 = matrix1.conjugate()
print("matrix1 transposed = ", repr(matrix4))
print("matrix1 conjugated = ", repr(matrix5))
# TRANSPOSITION AND CONJUGATE COUPLED METHOD
print("transposition and conjugate method")
# Check transpose method
matrix6 = matrix1.conjugateTranspose()
print("matrix1 conjugated and transposed = ", repr(matrix6))
# ADDITION METHOD
print("test 6 : addition method")
# Check addition method : we check the operator and the symmetry of the
# operator, thus testing the comparison operator
sum1 = matrix1 + matrix4
sum2 = matrix4 + matrix1
print("sum1 = ", repr(sum1))
print("sum2 = ", repr(sum2))
print("sum1 equals sum2 = ", sum1 == sum2)
# SUBSTRACTION METHOD
print("test 7 : substraction method")
# Check substraction method
diff = matrix1 - matrix4
print("diff = ", repr(diff))
# MATRIX MULTIPLICATION METHOD
print("test 8 : matrix multiplication method")
# Check multiplication method
prod = matrix1 * matrix4
print("prod = ", repr(prod))
# MULTIPLICATION WITH A NUMERICAL POINT METHOD
print("test 9 : multiplication with a numerical point method")
# Create the numerical point
pt = NumericalPoint()
pt.add(1.)
pt.add(2.)
print("pt = ", repr(pt))
# Check the product method
ptResult = matrix1 * pt
print("ptResult = ", repr(ptResult))
# MULTIPLICATION AND DIVISION BY A NUMERICAL SCALAR METHODS
print(
"test 10 : multiplication and division by a numerical scalar methods")
# Check the multiplication method
s = 3. + 1j
scalprod1 = matrix1 * s
print("scalprod1 = ", repr(scalprod1))
# Check the division method
scaldiv1 = matrix1 / s
print("scaldiv1 = ", repr(scaldiv1))
# ISEMPTY METHOD
print("test 10 : isEmpty method")
# Check method isEmpty
matrix7 = ComplexMatrix()
matrix8 = ComplexMatrix()
print("matrix1 is empty = ", matrix1.isEmpty())
print("matrix5 is empty = ", matrix7.isEmpty())
print("matrix6 is empty = ", matrix8.isEmpty())
print("matrix0 is empty = ", matrix0.isEmpty())
# MULTIPLICATION WITH A NUMERICAL POINT METHOD
print("test 11 : multiplication with a numerical point method")
# Create the numerical point
pt_test = NumericalPoint()
pt_test.add(1.)
pt_test.add(2.)
print("pt_test = ", repr(pt_test))
A = ComplexMatrix(2, 2)
A[0, 0] = 0.5
A[1, 0] = -(sqrt(3.) / 2)
A[0, 1] = (sqrt(3.) / 2)
A[1, 1] = 0.5
B = A.transpose()
identity = B * A
# Check the product method
ptResult2 = identity * pt_test
print("A = ", repr(A))
print("B = ", repr(B))
print("identity = ", repr(identity))
print("ptResult2 = ", repr(ptResult2))
except:
import sys
print("t_ComplexMatrix_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
dubourg/openturns
|
python/test/t_ComplexMatrix_std.py
|
Python
|
gpl-3.0
| 4,896
|
import numpy as np
import json
from query import *
def timeline_json(events, dates, query):
"""
"""
tl_json = { "events": [] }
num = 0
for event in events:
num = num+1
d_start = dates[event[0]-1]
d_end = dates[event[1]-1]
# Ajout de l'article marquant de l'intervalle
tmp = {}
res_req = query_articles(query, d_start, d_end)
tmp["start"] = res_req["hits"]["hits"][0]["_source"]["date_art"]
tmp["title"] = num
tmp["description"] = res_req["hits"]["hits"][0]["_source"]["article"].replace('\t','\n')
tmp["link"] = "http://localhost:5601/app/kibana#/doc/spliine/spliine/article?id=" + res_req["hits"]["hits"][0]["_source"]["id_art"]
tl_json['events'].append(tmp)
# Ajout de l'intervalle
tmp = {}
tmp["start"] = d_start
tmp["end"] = d_end
tmp["title"] = ""
tmp["durationEvent"] = 'true'
tmp["description"] = "Evenement entre " + d_start + " et " + d_end
tmp["link"] = ""
tl_json['events'].append(tmp)
return json.dumps(tl_json)
# Sauvegarde du `js`
# f = open('results.json', 'w')
# json.dump(tl_json, f, indent=1)
|
jcrouzet/temporal-search-engine
|
src/timeline_json.py
|
Python
|
gpl-3.0
| 1,218
|
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from . import ui, operator, prop
classes = (
operator.LoadProjectSpaceBoundaries,
operator.LoadSpaceBoundaries,
operator.LoadBoundary,
operator.SelectProjectBoundaries,
operator.ColourByRelatedBuildingElement,
operator.EnableEditingBoundary,
operator.DisableEditingBoundary,
operator.EditBoundaryAttributes,
operator.UpdateBoundaryGeometry,
ui.BIM_PT_Boundary,
ui.BIM_PT_SpaceBoundaries,
ui.BIM_PT_SceneBoundaries,
prop.BIMBoundaryProperties,
)
def register():
bpy.types.Object.bim_boundary_properties = bpy.props.PointerProperty(type=prop.BIMBoundaryProperties)
def unregister():
del bpy.types.Object.bim_boundary_properties
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/boundary/__init__.py
|
Python
|
lgpl-3.0
| 1,503
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import find_packages
from setuptools import setup
install_requires = [
'GitPython >= 2.0.8, < 2.1',
'MarkupSafe >= 0.23, < 1.0',
'PyYAML >= 3.11, < 4.0',
'decorator >= 4.0.10, < 5.0',
'docker-py >= 1.9.0, < 2.0',
'jsonpath-rw >= 1.4.0, < 2.0',
'pluggy >= 0.3.1, < 1.0',
'ply >= 3.8, < 4.0',
'py >= 1.4.31, < 2.0',
'requests >= 2.11.0, < 3.0',
'six >= 1.10.0, < 2.0',
'websocket-client >= 0.37.0, < 1.0'
]
tests_require = [
'pytest',
]
setup(
name='Master-Builder Build System',
version='1.0.0',
description='A Universal/Plugable Build System',
url='https://github.com/silverbp/master-builder',
author='Silver Blueprints LLC',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
entry_points="""
[console_scripts]
mb=mb.cli.main:main
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
|
silverbp/master-builder
|
setup.py
|
Python
|
apache-2.0
| 1,465
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
# The following keys are used in the segment dictionaries passed via
# the driver API. These are defined separately from similar keys in
# neutron.extensions.providernet so that drivers don't need to change
# if/when providernet moves to the core API.
#
ID = 'id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
# The following keys are used in the binding level dictionaries
# available via the binding_levels and original_binding_levels
# PortContext properties.
BOUND_DRIVER = 'bound_driver'
BOUND_SEGMENT = 'bound_segment'
@six.add_metaclass(abc.ABCMeta)
class TypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
ML2 type drivers each support a specific network_type for provider
and/or tenant network segments. Type drivers must implement this
abstract interface, which defines the API by which the plugin uses
the driver to manage the persistent type-specific resource
allocation state associated with network segments of that type.
Network segments are represented by segment dictionaries using the
NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
above, corresponding to the provider attributes. Future revisions
of the TypeDriver API may add additional segment dictionary
keys. Attributes not applicable for a particular network_type may
either be excluded or stored as None.
"""
@abc.abstractmethod
def get_type(self):
"""Get driver's network type.
:returns network_type value handled by this driver
"""
pass
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abc.abstractmethod
def is_partial_segment(self, segment):
"""Return True if segment is a partially specified segment.
:param segment: segment dictionary
:returns: boolean
"""
@abc.abstractmethod
def validate_provider_segment(self, segment):
"""Validate attributes of a provider network segment.
:param segment: segment dictionary using keys defined above
:raises: neutron.common.exceptions.InvalidInput if invalid
Called outside transaction context to validate the provider
attributes for a provider network segment. Raise InvalidInput
if:
- any required attribute is missing
- any prohibited or unrecognized attribute is present
- any attribute value is not valid
The network_type attribute is present in segment, but
need not be validated.
"""
pass
@abc.abstractmethod
def reserve_provider_segment(self, session, segment):
"""Reserve resource associated with a provider network segment.
:param session: database session
:param segment: segment dictionary
:returns: segment dictionary
Called inside transaction context on session to reserve the
type-specific resource for a provider network segment. The
segment dictionary passed in was returned by a previous
validate_provider_segment() call.
"""
pass
@abc.abstractmethod
def allocate_tenant_segment(self, session):
"""Allocate resource for a new tenant network segment.
:param session: database session
:returns: segment dictionary using keys defined above
Called inside transaction context on session to allocate a new
tenant network, typically from a type-specific resource
pool. If successful, return a segment dictionary describing
the segment. If tenant network segment cannot be allocated
(i.e. tenant networks not supported or resource pool is
exhausted), return None.
"""
pass
@abc.abstractmethod
def release_segment(self, session, segment):
"""Release network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to release a
tenant or provider network's type-specific resource. Runtime
errors are not expected, but raising an exception will result
in rollback of the transaction.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class NetworkContext(object):
"""Context passed to MechanismDrivers for changes to network resources.
A NetworkContext instance wraps a network resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the network in its current configuration.
Return the network, as defined by NeutronPluginBaseV2.
create_network and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the network in its original configuration.
Return the network, with all its properties set to their
original values prior to a call to update_network. Method is
only valid within calls to update_network_precommit and
update_network_postcommit.
"""
pass
@abc.abstractproperty
def network_segments(self):
"""Return the segments associated with this network resource."""
pass
@six.add_metaclass(abc.ABCMeta)
class SubnetContext(object):
"""Context passed to MechanismDrivers for changes to subnet resources.
A SubnetContext instance wraps a subnet resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the subnet in its current configuration.
Return the subnet, as defined by NeutronPluginBaseV2.
create_subnet and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the subnet in its original configuration.
Return the subnet, with all its properties set to their
original values prior to a call to update_subnet. Method is
only valid within calls to update_subnet_precommit and
update_subnet_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PortContext(object):
"""Context passed to MechanismDrivers for changes to port resources.
A PortContext instance wraps a port resource. It provides helper
methods for accessing other relevant information. Results from
expensive operations are cached so that other MechanismDrivers can
freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the port in its current configuration.
Return the port, as defined by NeutronPluginBaseV2.
create_port and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the port in its original configuration.
Return the port, with all its properties set to their
original values prior to a call to update_port. Method is
only valid within calls to update_port_precommit and
update_port_postcommit.
"""
pass
@abc.abstractproperty
def status(self):
"""Return the status of the current port."""
pass
@abc.abstractproperty
def original_status(self):
"""Return the status of the original port.
The method is only valid within calls to update_port_precommit and
update_port_postcommit.
"""
pass
@abc.abstractproperty
def network(self):
"""Return the NetworkContext associated with this port."""
pass
@abc.abstractproperty
def binding_levels(self):
"""Return dictionaries describing the current binding levels.
This property returns a list of dictionaries describing each
binding level if the port is bound or partially bound, or None
if the port is unbound. Each returned dictionary contains the
name of the bound driver under the BOUND_DRIVER key, and the
bound segment dictionary under the BOUND_SEGMENT key.
The first entry (index 0) describes the top-level binding,
which always involves one of the port's network's static
segments. In the case of a hierarchical binding, subsequent
entries describe the lower-level bindings in descending order,
which may involve dynamic segments. Adjacent levels where
different drivers bind the same static or dynamic segment are
possible. The last entry (index -1) describes the bottom-level
binding that supplied the port's binding:vif_type and
binding:vif_details attribute values.
Within calls to MechanismDriver.bind_port, descriptions of the
levels above the level currently being bound are returned.
"""
pass
@abc.abstractproperty
def original_binding_levels(self):
"""Return dictionaries describing the original binding levels.
This property returns a list of dictionaries describing each
original binding level if the port was previously bound, or
None if the port was unbound. The content is as described for
the binding_levels property.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def top_bound_segment(self):
"""Return the current top-level bound segment dictionary.
This property returns the current top-level bound segment
dictionary, or None if the port is unbound. For a bound port,
top_bound_segment is equivalent to
binding_levels[0][BOUND_SEGMENT], and returns one of the
port's network's static segments.
"""
pass
@abc.abstractproperty
def original_top_bound_segment(self):
"""Return the original top-level bound segment dictionary.
This property returns the original top-level bound segment
dictionary, or None if the port was previously unbound. For a
previously bound port, original_top_bound_segment is
equivalent to original_binding_levels[0][BOUND_SEGMENT], and
returns one of the port's network's static segments.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def bottom_bound_segment(self):
"""Return the current bottom-level bound segment dictionary.
This property returns the current bottom-level bound segment
dictionary, or None if the port is unbound. For a bound port,
bottom_bound_segment is equivalent to
binding_levels[-1][BOUND_SEGMENT], and returns the segment
whose binding supplied the port's binding:vif_type and
binding:vif_details attribute values.
"""
pass
@abc.abstractproperty
def original_bottom_bound_segment(self):
"""Return the original bottom-level bound segment dictionary.
This property returns the orignal bottom-level bound segment
dictionary, or None if the port was previously unbound. For a
previously bound port, original_bottom_bound_segment is
equivalent to original_binding_levels[-1][BOUND_SEGMENT], and
returns the segment whose binding supplied the port's previous
binding:vif_type and binding:vif_details attribute values.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def host(self):
"""Return the host associated with the 'current' port."""
pass
@abc.abstractproperty
def original_host(self):
"""Return the host associated with the 'original' port.
Method is only valid within calls to update_port_precommit
and update_port_postcommit.
"""
pass
@abc.abstractproperty
def segments_to_bind(self):
"""Return the list of segments with which to bind the port.
This property returns the list of segment dictionaries with
which the mechanism driver may bind the port. When
establishing a top-level binding, these will be the port's
network's static segments. For each subsequent level, these
will be the segments passed to continue_binding by the
mechanism driver that bound the level above.
This property is only valid within calls to
MechanismDriver.bind_port. It returns None otherwise.
"""
pass
@abc.abstractmethod
def host_agents(self, agent_type):
"""Get agents of the specified type on port's host.
:param agent_type: Agent type identifier
:returns: List of agents_db.Agent records
"""
pass
@abc.abstractmethod
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
"""Set the bottom-level binding for the port.
:param segment_id: Network segment bound for the port.
:param vif_type: The VIF type for the bound port.
:param vif_details: Dictionary with details for VIF driver.
:param status: Port status to set if not None.
This method is called by MechanismDriver.bind_port to indicate
success and specify binding details to use for port. The
segment_id must identify an item in the current value of the
segments_to_bind property.
"""
pass
@abc.abstractmethod
def continue_binding(self, segment_id, next_segments_to_bind):
"""Continue binding the port with different segments.
:param segment_id: Network segment partially bound for the port.
:param next_segments_to_bind: Segments to continue binding with.
This method is called by MechanismDriver.bind_port to indicate
it was able to partially bind the port, but that one or more
additional mechanism drivers are required to complete the
binding. The segment_id must identify an item in the current
value of the segments_to_bind property. The list of segments
IDs passed as next_segments_to_bind identify dynamic (or
static) segments of the port's network that will be used to
populate segments_to_bind for the next lower level of a
hierarchical binding.
"""
pass
@abc.abstractmethod
def allocate_dynamic_segment(self, segment):
"""Allocate a dynamic segment.
:param segment: A partially or fully specified segment dictionary
Called by the MechanismDriver.bind_port, create_port or update_port
to dynamically allocate a segment for the port using the partial
segment specified. The segment dictionary can be a fully or partially
specified segment. At a minumim it needs the network_type populated to
call on the appropriate type driver.
"""
pass
@abc.abstractmethod
def release_dynamic_segment(self, segment_id):
"""Release an allocated dynamic segment.
:param segment_id: UUID of the dynamic network segment.
Called by the MechanismDriver.delete_port or update_port to release
the dynamic segment allocated for this port.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class MechanismDriver(object):
"""Define stable abstract interface for ML2 mechanism drivers.
A mechanism driver is called on the creation, update, and deletion
of networks and ports. For every event, there are two methods that
get called - one within the database transaction (method suffix of
_precommit), one right afterwards (method suffix of _postcommit).
Exceptions raised by methods called inside the transaction can
rollback, but should not make any blocking calls (for example,
REST requests to an outside controller). Methods called after
transaction commits can make blocking external calls, though these
will block the entire process. Exceptions raised in calls after
the transaction commits may cause the associated resource to be
deleted.
Because rollback outside of the transaction is not done in the
update network/port case, all data validation must be done within
methods that are part of the database transaction.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_network_precommit(self, context):
"""Delete resources for a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Delete network resources previously allocated by this
mechanism driver for a network. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_subnet_precommit(self, context):
"""Allocate resources for a new subnet.
:param context: SubnetContext instance describing the new
subnet.
Create a new subnet, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_subnet_postcommit(self, context):
"""Create a subnet.
:param context: SubnetContext instance describing the new
subnet.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnet_precommit(self, context):
"""Update resources of a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Update values of a subnet, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_subnet_precommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnet_postcommit(self, context):
"""Update a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnet_postcommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnet_precommit(self, context):
"""Delete resources for a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Delete subnet resources previously allocated by this
mechanism driver for a subnet. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnet_postcommit(self, context):
"""Delete a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
pass
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def update_port_postcommit(self, context):
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def delete_port_precommit(self, context):
"""Delete resources of a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called inside transaction context on session. Runtime errors
are not expected, but raising an exception will result in
rollback of the transaction.
"""
pass
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using this mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(object):
"""Define stable abstract interface for ML2 extension drivers.
An extension driver extends the core resources implemented by the
ML2 plugin with additional attributes. Methods that process create
and update operations for these resources validate and persist
values for extended attributes supplied through the API. Other
methods extend the resource dictionaries returned from the API
operations with the values of the extended attributes.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abc.abstractproperty
def extension_alias(self):
"""Supported extension alias.
Return the alias identifying the core API extension supported
by this driver.
"""
pass
def process_create_network(self, session, data, result):
"""Process extended attributes for create network.
:param session: database session
:param data: dictionary of incoming network data
:param result: network dictionary to extend
Called inside transaction context on session to validate and
persist any extended network attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_create_subnet(self, session, data, result):
"""Process extended attributes for create subnet.
:param session: database session
:param data: dictionary of incoming subnet data
:param result: subnet dictionary to extend
Called inside transaction context on session to validate and
persist any extended subnet attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_create_port(self, session, data, result):
"""Process extended attributes for create port.
:param session: database session
:param data: dictionary of incoming port data
:param result: port dictionary to extend
Called inside transaction context on session to validate and
persist any extended port attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_network(self, session, data, result):
"""Process extended attributes for update network.
:param session: database session
:param data: dictionary of incoming network data
:param result: network dictionary to extend
Called inside transaction context on session to validate and
update any extended network attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def process_update_subnet(self, session, data, result):
"""Process extended attributes for update subnet.
:param session: database session
:param data: dictionary of incoming subnet data
:param result: subnet dictionary to extend
Called inside transaction context on session to validate and
update any extended subnet attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def process_update_port(self, session, data, result):
"""Process extended attributes for update port.
:param session: database session
:param data: dictionary of incoming port data
:param result: port dictionary to extend
Called inside transaction context on session to validate and
update any extended port attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_network_dict(self, session, result):
"""Add extended attributes to network dictionary.
:param session: database session
:param result: network dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a network
dictionary to be used for mechanism driver calls and/or
returned as the result of a network operation.
"""
pass
def extend_subnet_dict(self, session, result):
"""Add extended attributes to subnet dictionary.
:param session: database session
:param result: subnet dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a subnet
dictionary to be used for mechanism driver calls and/or
returned as the result of a subnet operation.
"""
pass
def extend_port_dict(self, session, result):
"""Add extended attributes to port dictionary.
:param session: database session
:param result: port dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a port
dictionary to be used for mechanism driver calls and/or
returned as the result of a port operation.
"""
pass
|
blueboxgroup/neutron
|
neutron/plugins/ml2/driver_api.py
|
Python
|
apache-2.0
| 35,825
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import numpy as np
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
graph = load_graph(model_file)
t = read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
template = "{} (score={:0.5f})"
for i in top_k:
print(template.format(labels[i], results[i]))
|
googlecodelabs/tensorflow-for-poets-2
|
scripts/label_image.py
|
Python
|
apache-2.0
| 4,889
|
from __future__ import print_function
#from unzip import unzip
import os, pdb, glob
def unpack_dir( extns=[ 'tgz', 'tar' ] ):
"""
Unpacks all the archives in the current directory with
the extensions listed in the keyword argument list.
This is routine currently fairly experimental and I tend
to use it on an ad hoc basis.
"""
archives = []
for i in range( len( extns ) ):
extn = extns[i]
archives = archives+glob.glob('*.'+extn)
n_archives = len(archives)
print( 'Now unpacking %i archives:' % n_archives )
finished_dir = 'already_unpacked'
try:
os.mkdir( finished_dir )
except:
pass
for i in range(len(archives)):
current = i+1
print( '\n currently unpacking %i of %i...\n' % (current,n_archives) )
ix = archives[i].rfind( '.' )
extn_i = archives[i][ix+1:]
if ( extn_i=='tgz' )+( extn_i=='tar' ):
os.system( 'tar -xzvf {0}'.format( archives[i] ) )
elif ( extn_i=='gz' )+( extn_i=='Z' ):
uncompressed = archives[i][:ix]
os.system( 'gunzip {0} -c > {1}'.format( archives[i], uncompressed ) )
os.system( 'mv {0} {1}'.format( archives[i], finished_dir ) )
else:
print( 'file extension {0} not recognised'.format( extn_i ) )
pdb.set_trace()
#unzip(archives[i])
print( 'Finished.\n' )
return None
|
tomevans/utils
|
unpack_dir.py
|
Python
|
gpl-2.0
| 1,435
|
import sys
from aaProbSolver import *
import re
import glob
import math
#'L' is the reference codon
aaList = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
#coef file and folders
firstOrderCoefFile = '/home/kwang2/scripts/aaCodonProbPred/coefs/aa_sa_coef.txt'
firstCondSecondFiles = glob.glob("/home/kwang2/result/newData_Feb3_2014/aa_firstCondSecond_TD10_coefs/*.txt")
secondCondFirstFiles = glob.glob("/home/kwang2/result/newData_Feb3_2014/aa_secondCondFirst_TD10_coefs/*.txt")
invsqCutoff=math.pow(1.0/10.0, 2)
class AApairwise:
__catPattern = re.compile(r"\_(\w+)\_coef\.txt")
def __init__(self, pwFile):
#init solvers
## first order solver
fFirstOrder = open(firstOrderCoefFile, "r")
firstOrderCoefs = fFirstOrder.readlines()
fFirstOrder.close()
self.firstOrderSolver = aaProbSolver(firstOrderCoefs)
## second order solvers
### first conditioned on second
self.firstCondSecondSolvers = self.__constructSolvers__(firstCondSecondFiles)
### second conditioned on first
self.secondCondFirstSolvers = self.__constructSolvers__(secondCondFirstFiles)
self.aaDict = dict()
self.saDict = dict()
self.firstPosDict = dict()
self.secondPosDict = dict()
self.pairDict = dict()
self.posList = []
fInput = open(pwFile, "r")
allLines = fInput.readlines()
fInput.close()
for line in allLines:
tmpLine = line.split()
firstAA = tmpLine[4]
secondAA = tmpLine[5]
firstPos = tmpLine[6]
secondPos = tmpLine[7]
firstSA = tmpLine[9]
secondSA = tmpLine[13]
#float
invSD = 1.0/(float(secondPos) - float(firstPos))
invTD = 1.0/float(tmpLine[3])
invsqSD = math.pow(invSD,2)
invsqTD = math.pow(invTD,2)
# fill aaDict and saDict
if firstPos not in self.aaDict.keys():
self.aaDict[firstPos] = firstAA
self.saDict[firstPos] = firstSA
if secondPos not in self.aaDict.keys():
self.aaDict[secondPos] = secondAA
self.saDict[secondPos] = secondSA
# fill pos Dicts
if firstPos not in self.firstPosDict.keys():
self.firstPosDict[firstPos] = []
self.firstPosDict[firstPos].append(secondPos)
else:
if secondPos not in self.firstPosDict[firstPos]:
self.firstPosDict[firstPos].append(secondPos)
if secondPos not in self.secondPosDict.keys():
self.secondPosDict[secondPos] = []
self.secondPosDict[secondPos].append(firstPos)
else:
if firstPos not in self.secondPosDict[secondPos]:
self.secondPosDict[secondPos].append(firstPos)
#fill in pairDict
currPair = (firstPos, secondPos)
self.pairDict[currPair] = map(float, ["1", firstSA, secondSA, invsqSD, invsqTD])
self.posList = sorted(map(int, self.aaDict.keys()))
def __constructSolvers__(self, coefFiles):
solvers = {}
for f in coefFiles:
currCatName = AApairwise.__catPattern.search(f).group(1)
currFile = open(f, "r")
currDat = currFile.readlines()
currFile.close()
solvers[currCatName] = aaProbSolver(currDat)
return solvers
def getAAprob_onePos(self, pos):
aaProbs = []
for aa in aaList:
currAAprob = 0
currLogP = 0
#calculate prob for this aa
#first order
firstOrderX = [1, float(self.saDict[pos])]
firstOrderProb = self.firstOrderSolver.getCatProb(aa, firstOrderX)
currLogP += math.log(firstOrderProb)
#second order
beforeAAs = []
afterAAs = []
if pos in self.secondPosDict.keys():
beforeAAs = self.secondPosDict[pos]
#deal with P(pos=second|first) prob calculation
for firstAA_pos in beforeAAs:
tmpX = self.pairDict[(firstAA_pos, pos)]
#TD10 specific
if tmpX[4] > invsqCutoff:
currLogP += math.log(self.firstCondSecondSolvers[aa].getCatProb(self.aaDict[firstAA_pos], tmpX)) - math.log(self.firstOrderSolver.getCatProb(self.aaDict[firstAA_pos], [1, float(self.saDict[firstAA_pos])]))
if pos in self.firstPosDict.keys():
afterAAs = self.firstPosDict[pos]
#deal with P(pos=first|second) prob calculation
for secondAA_pos in afterAAs:
tmpX = self.pairDict[(pos, secondAA_pos)]
if tmpX[4] > invsqCutoff:
currLogP += math.log(self.secondCondFirstSolvers[aa].getCatProb(self.aaDict[secondAA_pos], tmpX)) - math.log(self.firstOrderSolver.getCatProb(self.aaDict[secondAA_pos], [1, float(self.saDict[secondAA_pos])]))
currAAprob = math.exp(currLogP)
aaProbs.append(currAAprob)
sumProb = sum(aaProbs)
aaProbs = [x/sumProb for x in aaProbs]
maxProb = max(aaProbs)
maxAA = aaList[aaProbs.index(maxProb)]
trueAA = self.aaDict[pos]
trueProb = aaProbs[aaList.index(trueAA)]
return pos + "," + maxAA + "," + str(maxProb) + "," + trueAA + "," + str(trueProb) + "," + ",".join(map(str, aaProbs)) + "\n"
def getAAprob_allPos(self, outputFile):
f = open(outputFile, "w")
for i in self.posList:
resultLine = self.getAAprob_onePos(str(i))
f.write(resultLine)
f.close()
if __name__ =='__main__':
inputFile = sys.argv[1]
outputFile = sys.argv[2]
AApw = AApairwise(inputFile)
AApw.getAAprob_allPos(outputFile)
|
learking/aaCodonProbPred
|
predAAprob/predAAprob_hpc_TD10.py
|
Python
|
gpl-2.0
| 5,647
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.