code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# test for wasanbon/core/plugins/admin/idl_plugin/idl_parser/node.py
import unittest
from unittest.mock import Mock
from wasanbon.core.plugins.admin.idl_plugin.idl_parser import node
class TestPlugin(unittest.TestCase):
def test_init(self):
"""test for init"""
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent_mock = Mock(spec=['full_path'])
test_parent_mock.full_path.return_value = 'test_parent'
# test
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
self.assertEqual(idl_node._classname, test_classname)
self.assertEqual(idl_node._parent, test_parent_mock)
self.assertEqual(idl_node._name, test_name)
self.assertEqual(idl_node._filepath, None)
def test_name_and_type(self):
"""test for _name_and_type"""
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent_mock = Mock(spec=['full_path'])
test_parent_mock.full_path.return_value = 'test_parent'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
# test
test_blocks = ['test1', 'test2']
ret = idl_node._name_and_type(test_blocks)
self.assertEqual(ret, (test_blocks[1], test_blocks[0]))
def test_refine_typename_no_sequence_and_type(self):
"""test for refine_typename sequence < 0, find_types = '' """
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent_mock = Mock(spec=['full_path', 'is_root', 'find_types'])
test_parent_mock.full_path.return_value = 'test_parent'
test_parent_mock.is_root.return_value = True
test_parent_mock.find_types.return_value = ''
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
# test
test_typ = 'test1'
ret = idl_node.refine_typename(test_typ)
self.assertEqual(ret, test_typ)
def test_refine_typename_no_sequence_but_find_full_path(self):
"""test for refine_typename sequence < 0, find_types = [Mock] """
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent = 'test_parent'
test_parent_mock = Mock(spec=['full_path', 'is_root', 'find_types'])
test_parent_mock.full_path.return_value = test_parent
test_parent_mock.is_root.return_value = True
# mock for find_types
test_full_path = 'test_fullpath'
full_path_mock = Mock(spec=['full_path'])
full_path_mock.full_path = test_full_path
test_parent_mock.find_types.return_value = [full_path_mock]
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
# test
test_typ = 'test1'
ret = idl_node.refine_typename(test_typ)
self.assertEqual(ret, test_full_path)
def test_refine_typename_with_sequence(self):
"""test for refine_typename sequence > 0"""
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent_mock = Mock(spec=['full_path', 'is_root', 'find_types'])
test_parent_mock.full_path.return_value = 'test_parent'
test_parent_mock.is_root.return_value = True
test_parent_mock.find_types.return_value = ''
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
# test
test_typ = 'sequence'
ret = idl_node.refine_typename(test_typ)
typ_ = test_typ[test_typ.find('<') + 1: test_typ.find('>')]
self.assertEqual(ret, 'sequence < ' + typ_ + ' >')
def test_properties(self):
"""test for properties"""
test_classname = 'test_classname'
test_name = 'test_name'
# mock for parent
test_parent_mock = Mock(spec=['full_path'])
test_parent_mock.full_path.return_value = 'test_parent'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
# test (exept logics)
self.assertEqual(idl_node.filepath, idl_node._filepath)
self.assertEqual(idl_node.is_array, test_classname == 'IDLArray')
self.assertEqual(idl_node.is_void, test_classname == 'IDLVoid')
self.assertEqual(idl_node.is_struct, test_classname == 'IDLStruct')
self.assertEqual(idl_node.is_typedef, test_classname == 'IDLTypedef')
self.assertEqual(idl_node.is_sequence, test_classname == 'IDLSequence')
self.assertEqual(idl_node.is_primitive,
test_classname == 'IDLPrimitive')
self.assertEqual(idl_node.is_interface,
test_classname == 'IDLInterface')
self.assertEqual(idl_node.is_enum, test_classname == 'IDLEnum')
self.assertEqual(idl_node.classname, test_classname)
self.assertEqual(idl_node.name, test_name)
self.assertEqual(idl_node.parent, test_parent_mock)
self.assertEqual(idl_node.is_root, test_parent_mock == None)
def test_properties_basename(self):
"""test for properties basename"""
test_classname = 'test_classname'
# mock for parent
test_parent_mock = Mock(spec=['full_path'])
test_parent_mock.full_path.return_value = 'test_parent'
# test (with "::")
test_name = 'test_name::test1'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
self.assertEqual(idl_node.basename,
test_name[test_name.rfind('::') + 2:])
# test (without "::")
test_name = 'test_nametest1'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
self.assertEqual(idl_node.basename, test_name)
def test_properties_pathname(self):
"""test for properties pathname"""
test_classname = 'test_classname'
# mock for parent
test_parent_mock = Mock(spec=['full_path'])
test_parent_mock.full_path.return_value = 'test_parent'
# test (with "::")
test_name = 'test_name::test1'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
self.assertEqual(idl_node.pathname, test_name[:test_name.rfind('::')])
# test (without "::")
test_name = 'test_nametest1'
idl_node = node.IDLNode(test_classname, test_name, test_parent_mock)
self.assertEqual(idl_node.pathname, '')
if __name__ == '__main__':
unittest.main()
| sugarsweetrobotics/wasanbon | test/test_admin_idl_plugin/test_idl_parser/test_node.py | Python | gpl-3.0 | 6,661 |
tile = [[0,1,4,5],
[2,3,6,7],
[8,9,12,13],
[10,11,14,15]]
shift = 0
align = 1
value = 0L
holder = []
import sys
basemask = [0x
fd = sys.stdout
indent = " "*9
for c in range(4):
fd.write(indent + "*pdst++ = \n");
for l,line in enumerate(tile):
fd.write(indent + " %s_mm_shuffle_epi8(line%d, (__m128i){"%(l and '+' or ' ',l))
for i,pos in enumerate(line):
mask = 0x00ffffffff & (~(0xffL << shift))
value = mask | ((pos) << shift)
holder.append(value)
if holder and (i + 1) %2 == 0:
fd.write("0x%8.0x"%(holder[0] + (holder[1] << 32)))
holder = []
if (i) %4 == 1:
fd.write( ',')
fd.write("})%s\n"%((l == 3) and ';' or ''))
print
shift += 8
| zcbenz/cefode-chromium | third_party/mesa/MesaLib/src/gallium/drivers/llvmpipe/lp_tile_shuffle_mask.py | Python | bsd-3-clause | 716 |
# Copyright (C) 2007 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Converters, etc for going between Bazaar and Git ids."""
from bzrlib import errors, foreign
from bzrlib.inventory import ROOT_ID
from bzrlib.foreign import (
ForeignRevision,
)
def escape_file_id(file_id):
return file_id.replace('_', '__').replace(' ', '_s')
def unescape_file_id(file_id):
return file_id.replace("_s", " ").replace("__", "_")
class BzrGitMapping(foreign.VcsMapping):
"""Class that maps between Git and Bazaar semantics."""
experimental = False
def revision_id_foreign_to_bzr(self, git_rev_id):
"""Convert a git revision id handle to a Bazaar revision id."""
return "%s:%s" % (self.revid_prefix, git_rev_id)
def revision_id_bzr_to_foreign(self, bzr_rev_id):
"""Convert a Bazaar revision id to a git revision id handle."""
if not bzr_rev_id.startswith("%s:" % self.revid_prefix):
raise errors.InvalidRevisionId(bzr_rev_id, self)
return bzr_rev_id[len(self.revid_prefix)+1:]
def show_foreign_revid(self, foreign_revid):
return { "git commit": foreign_revid }
def generate_file_id(self, path):
if path == "":
return ROOT_ID
return escape_file_id(path.encode('utf-8'))
def import_commit(self, commit):
"""Convert a git commit to a bzr revision.
:return: a `bzrlib.revision.Revision` object.
"""
if commit is None:
raise AssertionError("Commit object can't be None")
rev = ForeignRevision(commit.id, self, self.revision_id_foreign_to_bzr(commit.id))
rev.parent_ids = tuple([self.revision_id_foreign_to_bzr(p) for p in commit.parents])
rev.message = commit.message.decode("utf-8", "replace")
rev.committer = str(commit.committer).decode("utf-8", "replace")
if commit.committer != commit.author:
rev.properties['author'] = str(commit.author).decode("utf-8", "replace")
rev.timestamp = commit.commit_time
rev.timezone = 0
return rev
class BzrGitMappingv1(BzrGitMapping):
revid_prefix = 'git-v1'
experimental = False
class BzrGitMappingExperimental(BzrGitMappingv1):
revid_prefix = 'git-experimental'
experimental = True
default_mapping = BzrGitMappingv1()
| harsh-a1/repeater-testing | mapping.py | Python | gpl-2.0 | 3,000 |
import idautils
import idaapi
import idc
import socket
import select
# remote control for IDA using a text protocol
# by default listens on localhost:56789
# tested with IDA7.3, IDA7.4
# to stop, run 'idaremote.quit()' from within IDA
class IdaRemote:
sock = None
sock_client = None
ida_timer_delay = 50
debug = False
# open a network socket for incoming connections
def listen(self, host="localhost", port=56789):
idaapi.msg("IdaRemote listening on {}:{}\n".format(host, str(port)))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(10)
# register in ida to call main_iter() every 50ms
# allows to runs in the IDA main loop (modifying the IDB from a thread may corrupt the IDB)
def register_ida_timer(self, delay=50):
self.ida_timer_delay = delay
ida_kernwin.register_timer(delay, self.main_iter)
# check if we have a pending connection, handle it
def main_iter(self):
if not self.sock:
# cli called cmd_exitplugin(), unregister the timer
return -1
r, w, e = select.select([self.sock], [], [], 0)
for s in r:
client, addr = s.accept()
select.select([client], [], [], 10)
self.sock_client = client
rq = self.client_recv(4096)
ans = self.handle_rq(rq)
self.client_send(ans)
client.close()
self.sock_client = None
return self.ida_timer_delay
# parse one request, handle it, return the reply
def handle_rq(self, rq):
if self.debug:
if len(rq) > 64:
idaapi.msg("IdaRemote request: {}...\n".format(repr(rq[:62])))
else:
idaapi.msg("IdaRemote request: {}\n".format(repr(rq)))
splt = rq.split(" ", 1)
cmd = splt[0]
method = getattr(self, "cmd_" + cmd, False)
if method:
try:
# introspection to find the required number of args
# avoids parsing quoted strings in the queries, allow some methods to receive args containing space characters (eg set_comment)
method_nargs = method.__code__.co_argcount - 1
if method_nargs == 0:
return method()
elif method_nargs == 1:
return method(splt[1])
else:
return method(*splt[1].split(" ", method_nargs-1))
except Exception as err:
idaapi.msg("IdaRemote exception: {}\n".format(err))
return ""
else:
return "unknown command " + cmd
def client_send(self, msg):
# python2+3 compat
try:
bmsg = bytes(msg, 'latin1')
except:
bmsg = bytes(msg)
try:
return self.sock_client.send(bmsg)
except Exception as err:
idaapi.msg("IdaRemote client_send(): {}\n".format(err))
return 0
def client_recv(self, ln):
bmsg = self.sock_client.recv(ln)
# python2+3 compat # lol
msg = str(bmsg.decode('latin1'))
return msg
def client_wait(self, time_s):
return select.select([self.sock_client], [], [], time_s)
# hexencode a buffer
def str2hex(self, raw):
# python2+3 compat
try:
# raw already bytes
return "".join(["{:02X}".format(b) for b in raw])
except:
try:
# python3, raw is string
return "".join(["{:02X}".format(b) for b in bytes(raw, 'latin1')])
except:
# python2
return "".join(["{:02X}".format(ord(b)) for b in bytes(raw)])
# encode an address in hex, return '-1' for invalid address
def fmt_addr(self, addr):
if addr == ida_idaapi.BADADDR:
return "-1"
else:
return "0x{:04X}".format(addr)
def quit(self):
self.cmd_exitplugin()
return ""
# list of supported commands
# address -> label name
def cmd_get_label(self, addr):
return idc.get_name(int(addr, 0))
# set a label at an address
def cmd_set_label(self, addr, label):
if idaapi.set_name(int(addr, 0), label, idaapi.SN_NOWARN|idaapi.SN_NOCHECK):
return "ok"
return ""
# label name -> address
# return 0xffffffff or 0xffffffffffffffff (BAD_ADDR) if not existing
def cmd_resolve_label(self, label):
addr = idc.get_name_ea_simple(label)
return self.fmt_addr(addr)
# return the list of addrs for which a name exists
def cmd_get_named_addrs(self, a_start, a_end):
# idautils.Names() does not work in 7.3
return " ".join([self.fmt_addr(a) for a in range(int(a_start, 0), int(a_end, 0)) if idc.get_name(a)])
# read raw data from an address
def cmd_get_bytes(self, addr, len):
raw = idc.get_bytes(int(addr, 0), int(len, 0))
if raw:
return self.str2hex(raw)
return ""
# read one byte
def cmd_get_byte(self, addr):
return str(idc.get_wide_byte(int(addr, 0)))
# read one word
def cmd_get_word(self, addr):
return str(idc.get_wide_word(int(addr, 0)))
# read one dword
def cmd_get_dword(self, addr):
return str(idc.get_wide_dword(int(addr, 0)))
# read one qword
def cmd_get_qword(self, addr):
return str(idc.get_qword(int(addr, 0)))
# return an array of xrefs to the specified addr
# array is a sequence of hex addresses separate by spaces
def cmd_get_xrefs_to(self, addr):
ALL_XREFS = 0
xrefs = idautils.XrefsTo(int(addr, 0), ALL_XREFS)
return " ".join([self.fmt_addr(xr.frm) for xr in xrefs])
# end the idaremote plugin loop, close the listening socket
def cmd_exitplugin(self):
idaapi.msg("IdaRemote closing\n")
if self.sock:
self.sock.close()
self.sock = None
self.ida_timer_delay = -1
return "bye"
# ask IDA to save IDB and exit
def cmd_exit_ida(self, c):
idaapi.msg("IdaRemote exiting IDA\n")
idc.qexit(int(c, 0))
return "bye" # not reached?
# get the non-repeatable comment at address
def cmd_get_comment(self, addr):
c = idc.get_cmt(int(addr, 0), 0)
if c:
return c
return ""
# set the non-repeatable comment at address
def cmd_set_comment(self, addr, cmt):
if idc.set_cmt(int(addr, 0), cmt, 0):
return "ok"
return ""
# return the current cursor address (ScreenEA)
def cmd_get_cursor_pos(self):
return self.fmt_addr(idc.get_screen_ea())
# set the current cursor address
def cmd_set_cursor_pos(self, a):
if idc.jumpto(int(a, 0)):
return "ok"
return ""
# return the start/end address of the current selection
def cmd_get_selection(self):
return " ".join(self.fmt_addr(a) for a in [idc.read_selection_start(), idc.read_selection_end()])
# return the flags for an address
def cmd_get_flags(self, a):
return "0x{:08X}".format(idc.get_full_flags(int(a, 0)))
# return the list of head addresses (instruction or data) in a range
def cmd_get_heads(self, a_start, a_end):
return " ".join([self.fmt_addr(a) for a in Heads(int(a_start, 0), int(a_end, 0))])
# return the previous head before an address
def cmd_get_prev_head(self, a):
return self.fmt_addr(idc.prev_head(int(a, 0)))
# return the next head after an address
def cmd_get_next_head(self, a):
return self.fmt_addr(idc.next_head(int(a, 0)))
# return the size of an item (head)
def cmd_get_item_size(self, a):
return str(idc.get_item_size(int(a, 0)))
# return the list of functions in a range
def cmd_get_functions(self, a_start, a_end):
return " ".join([self.fmt_addr(a) for a in Functions(int(a_start, 0), int(a_end, 0))])
# return the address of a function from the address of an instruction
def cmd_get_function_start(self, a):
addr = idc.get_name_ea_simple(idc.get_func_name(int(a, 0)))
return self.fmt_addr(addr)
# return the name of a function from the address of an instruction of the body
def cmd_get_function_name(self, a):
return idc.get_func_name(int(a, 0))
# return the (nonrepeatable) function comment
def cmd_get_function_comment(self, a):
return idc.get_func_cmt(int(a, 0), 0)
# set the (nonrepeatable) function comment
def cmd_set_function_comment(self, a, c):
if idc.set_func_cmt(int(a, 0), c, 0):
return "ok"
return ""
# return the function flags for an address
def cmd_get_function_flags(self, a):
return "0x{:08X}".format(idc.get_func_attr(int(a, 0), idc.FUNCATTR_FLAGS))
# return the address of each basicblock of the function
def cmd_get_function_blocks(self, a):
fc = idaapi.FlowChart(idaapi.get_func(int(a, 0)))
return " ".join([self.fmt_addr(b.start_ea) for b in fc])
# return the C prototype for an address
def cmd_get_type(self, a):
t = idc.get_type(int(a, 0))
if not t:
t = ""
return t
# set the C prototype for an address
def cmd_set_type(self, a, t):
if idc.SetType(int(a, 0), t):
return "ok"
return ""
# return list of all segments start address
def cmd_get_segments(self):
return " ".join([self.fmt_addr(a) for a in Segments()])
# return the start address for the segment from any address within
def cmd_get_segment_start(self, a):
return self.fmt_addr(idc.get_segm_start(int(a, 0)))
# return the end address for the segment starting at a
def cmd_get_segment_end(self, a):
return self.fmt_addr(idc.get_segm_end(int(a, 0)))
# return the name of a segment
def cmd_get_segment_name(self, a):
return idc.get_segm_name(int(a, 0))
# return the mnemonic of an opcode at addr
def cmd_get_op_mnemonic(self, a):
return idc.print_insn_mnem(int(a, 0))
# tell IDA to convert an address into an alignment directive
def cmd_make_align(self, a, count, align):
return str(idc.create_align(int(a, 0), int(count, 0), int(align, 0)))
# tell IDA to make an array, reuse current type
def cmd_make_array(self, a, count):
return str(idc.make_array(int(a, 0), int(count, 0)))
# tell IDA to convert to a byte
def cmd_make_byte(self, a):
return str(idc.create_data(int(a, 0), idc.FF_BYTE, 1, ida_idaapi.BADADDR))
# tell IDA to convert to a word
def cmd_make_word(self, a):
return str(idc.create_data(int(a, 0), idc.FF_WORD, 2, ida_idaapi.BADADDR))
# tell IDA to convert to a dword
def cmd_make_dword(self, a):
return str(idc.create_data(int(a, 0), idc.FF_DWORD, 4, ida_idaapi.BADADDR))
# tell IDA to convert to a qword
def cmd_make_qword(self, a):
return str(idc.create_data(int(a, 0), idc.FF_QWORD, 8, ida_idaapi.BADADDR))
# tell IDA to convert to a string
# a_end = 0 => auto size
def cmd_make_string(self, a, len, kind):
return str(ida_bytes.create_strlit(int(a, 0), int(len, 0), int(kind, 0)))
# tell IDA to disassemble
def cmd_make_code(self, a):
return str(idc.create_insn(int(a, 0)))
# undefine at an address
# for code, undefine following instructions too
def cmd_undefine(self, a):
return str(idc.del_items(int(a, 0), 1))
# patch a raw byte in the IDB
def cmd_patch_byte(self, a, v):
if idc.patch_byte(int(a, 0), int(v, 0)):
return "ok"
return ""
# return the path of the analysed file
def cmd_get_input_path(self):
return idc.get_input_file_path()
# return the nth entrypoint address
def cmd_get_entry(self, idx):
return self.fmt_addr(idc.get_entry(idc.get_entry_ordinal(int(idx, 0))))
# return <cpu_name> <word size> <endianness>
def cmd_get_cpuinfo(self):
info = idaapi.get_inf_structure()
cpu_name = info.procName
if info.is_64bit():
word_size = 64
elif info.is_32bit():
word_size = 32
else:
word_size = 16
if info.is_be():
endian = 'big'
else:
endian = 'little'
return " ".join([cpu_name, str(word_size), endian])
# identify the remote version
# ex: "ida 7.4"
def cmd_get_remoteid(self):
return "ida " + ida_kernwin.get_kernel_version()
# run many commands at once
# batch is a list of separate commands
# run all of them and return the array of results
# array encoded as sequence of <str(int(len(element)))><space><element>
# ex: "14 get_cursor_pos4 exitplugin" -> "4 0x423 bye"
def cmd_batch(self, batch):
ans_ary = []
off = 0
while off < len(batch):
off_len = batch.find(" ", off) # way faster than split() for large strings
ln = int(batch[off:off_len])
off = off_len+1+ln
rq = batch[off_len+1:off]
ans = self.handle_rq(rq)
if not isinstance(ans, str):
idaapi.msg("output of {} is not a str\n".format(rq))
ans_ary.append(ans)
return "".join([str(len(ans)) + " " + ans for ans in ans_ary])
# handle multiple sequential requests/responses in the client socket
# allow large requests
# payload = <str(int(len(request0)))><space><request0>
# sends back <str(int(len(answer0)))><space><answer0>
# reads another request until len(request) == 0
# if the 1st request is incomplete from the initial recv(), fetch missing data
def cmd_multirq(self, buf):
while 1:
if not " " in buf:
idaapi.msg("IdaRemote multirq client timeout\n")
return ""
ln, buf = buf.split(" ", 1)
if int(ln) == 0:
return "0 "
while int(ln) > len(buf):
buf += self.client_recv(int(ln)-len(buf))
rq = buf[:int(ln)]
buf = buf[int(ln):]
ans = self.handle_rq(rq)
self.client_send(str(len(ans)) + " " + ans)
if " " not in buf:
self.client_wait(4)
buf += self.client_recv(4096)
idaremote = IdaRemote()
idaremote.listen()
idaremote.register_ida_timer()
| jjyg/metasm | samples/ida/idaremote.py | Python | lgpl-2.1 | 14,647 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import QgsMapLayerRegistry
from .db_model import DBModel
from .db_plugins.plugin import DBPlugin, Schema, Table
from .in_memory import toCacheLayer
class DBTree(QTreeView):
def __init__(self, mainWindow):
QTreeView.__init__(self, mainWindow)
self.mainWindow = mainWindow
self.setModel( DBModel(self) )
self.setHeaderHidden(True)
self.setEditTriggers(QTreeView.EditKeyPressed|QTreeView.SelectedClicked)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.connect(self.selectionModel(), SIGNAL("currentChanged(const QModelIndex&, const QModelIndex&)"), self.currentItemChanged)
self.connect(self, SIGNAL("expanded(const QModelIndex&)"), self.itemChanged)
self.connect(self, SIGNAL("collapsed(const QModelIndex&)"), self.itemChanged)
self.connect(self.model(), SIGNAL("dataChanged(const QModelIndex&, const QModelIndex&)"), self.modelDataChanged)
self.connect(self.model(), SIGNAL("notPopulated"), self.collapse)
def refreshItem(self, item=None):
if item == None:
item = self.currentItem()
if item == None: return
self.model().refreshItem(item)
def showSystemTables(self, show):
pass
def currentItem(self):
indexes = self.selectedIndexes()
if len(indexes) <= 0:
return
return self.model().getItem(indexes[0])
def currentDatabase(self):
item = self.currentItem()
if item == None: return
if isinstance(item, (DBPlugin, Schema, Table)):
return item.database()
return None
def currentSchema(self):
item = self.currentItem()
if item == None: return
if isinstance(item, (Schema, Table)):
return item.schema()
return None
def currentTable(self):
item = self.currentItem()
if item == None: return
if isinstance(item, Table):
return item
return None
def itemChanged(self, index):
self.setCurrentIndex(index)
self.emit( SIGNAL('selectedItemChanged'), self.currentItem() )
def modelDataChanged(self, indexFrom, indexTo):
self.itemChanged(indexTo)
def currentItemChanged(self, current, previous):
self.itemChanged(current)
def contextMenuEvent(self, ev):
index = self.indexAt( ev.pos() )
if not index.isValid():
return
if index != self.currentIndex():
self.itemChanged(index)
item = self.currentItem()
menu = QMenu(self)
if isinstance(item, (Table, Schema)):
menu.addAction(self.tr("Rename"), self.rename)
menu.addAction(self.tr("Delete"), self.delete)
if isinstance(item, Table):
menu.addSeparator()
menu.addAction(self.tr("Add to canvas"), self.addLayer)
menu.addAction(self.tr("Add to canvas as in-memory layer"), self.addLayerInMemory)
elif isinstance(item, DBPlugin) and item.database() is not None:
menu.addAction(self.tr("Re-connect"), self.reconnect)
if not menu.isEmpty():
menu.exec_(ev.globalPos())
menu.deleteLater()
def rename(self):
index = self.currentIndex()
item = self.model().getItem(index)
if isinstance(item, (Table, Schema)):
self.edit( index )
def delete(self):
item = self.currentItem()
if isinstance(item, (Table, Schema)):
self.mainWindow.invokeCallback(item.database().deleteActionSlot)
def addLayer(self):
table = self.currentTable()
if table is not None:
QgsMapLayerRegistry.instance().addMapLayers([table.toMapLayer()])
def addLayerInMemory(self):
table = self.currentTable()
if table is not None:
layer = toCacheLayer(table.toMapLayer())
QgsMapLayerRegistry.instance().addMapLayers([layer])
def reconnect(self):
db = self.currentDatabase()
if db is not None:
self.mainWindow.invokeCallback(db.reconnectActionSlot)
| medspx/dbmanager-cache | db_tree.py | Python | gpl-2.0 | 4,893 |
# Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.const import CACHE_PATH, PROFILE_PATH
def _get_legacy_global(name):
constructed = portage._legacy_globals_constructed
if name in constructed:
return getattr(portage, name)
if name == 'portdb':
portage.portdb = portage.db[portage.root]["porttree"].dbapi
constructed.add(name)
return getattr(portage, name)
elif name in ('mtimedb', 'mtimedbfile'):
portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
CACHE_PATH, "mtimedb")
constructed.add('mtimedbfile')
portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
constructed.add('mtimedb')
return getattr(portage, name)
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
kwargs = {}
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
kwargs[k] = os.environ.get(envvar)
portage._initializing_globals = True
portage.db = portage.create_trees(**kwargs)
constructed.add('db')
del portage._initializing_globals
settings = portage.db[portage.db._target_eroot]["vartree"].settings
portage.settings = settings
constructed.add('settings')
# Since portage.db now uses EROOT for keys instead of ROOT, we make
# portage.root refer to EROOT such that it continues to work as a key.
portage.root = portage.db._target_eroot
constructed.add('root')
# COMPATIBILITY
# These attributes should not be used within
# Portage under any circumstances.
portage.archlist = settings.archlist()
constructed.add('archlist')
portage.features = settings.features
constructed.add('features')
portage.groups = settings["ACCEPT_KEYWORDS"].split()
constructed.add('groups')
portage.pkglines = settings.packages
constructed.add('pkglines')
portage.selinux_enabled = settings.selinux_enabled()
constructed.add('selinux_enabled')
portage.thirdpartymirrors = settings.thirdpartymirrors()
constructed.add('thirdpartymirrors')
profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
if not os.path.isdir(profiledir):
profiledir = None
portage.profiledir = profiledir
constructed.add('profiledir')
return getattr(portage, name)
| funtoo/portage-funtoo | pym/portage/_legacy_globals.py | Python | gpl-2.0 | 2,254 |
# Python program to find whether a number is a palindrome or not
# Function to reverse a number
def reverse(num):
# Initialize variable
rev = 0
while ( num > 0 ) :
dig = num % 10
rev = rev * 10 + dig
num = num // 10
# Returning reversed number
return rev
# --- main ---
num = int(input("Enter a number:"))
a = reverse(num)
# Comparing the reversed number with original number
if(a == num):
print("Number entered is palindrome!")
else:
print("Number entered is not a palindrome!")
'''
TEST CASES
INPUT
Enter a number:3445443
Number entered is palindrome!
OUTPUT
Enter a number:234
Number entered is not a palindrome!
'''
| jainaman224/Algo_Ds_Notes | Palindrome/palindrome_no.py | Python | gpl-3.0 | 690 |
#!/usr/bin/env python
import sys, json, os
# Traits stuff
from traits.api import HasTraits, Instance, Array, Bool, Dict, Range, \
Color, List, Int, Property, Any, Function, DelegatesTo, Str, Enum, \
on_trait_change, Button, Set, File, Int, Bool, cached_property
from traitsui.api import View, Item, VGroup, HGroup, Group, \
RangeEditor, TableEditor, Handler, Include,HSplit, EnumEditor, HSplit, Action, \
CheckListEditor, ObjectColumn
from ..database.traited_query import Scan
from ..streamlines.track_dataset import TrackDataset
from ..streamlines.track_math import connection_ids_from_tracks
from ..volumes.mask_dataset import MaskDataset
from ..volumes import get_NTU90, graphml_from_label_source, get_builtin_atlas_parameters
from dsi2.volumes import QSDR_AFFINE, QSDR_SHAPE
import networkx as nx
import numpy as np
import nibabel as nib
import gzip
from scipy.io.matlab import loadmat
import subprocess
import cPickle as pickle
import pymongo
from bson.binary import Binary
import os.path as op
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
import re
import multiprocessing
def init_db(db):
"""
adds collections and makes sure that indexes work for them
"""
db.streamlines.ensure_index([("scan_id",pymongo.ASCENDING),
("sl_id",pymongo.ASCENDING)])
db.coordinates.ensure_index([("scan_id",pymongo.ASCENDING),
("ijk",pymongo.ASCENDING)])
db.connections.ensure_index([("con_id",pymongo.ASCENDING),
("scan_id",pymongo.ASCENDING),("atlas_id",pymongo.ASCENDING)])
db.streamline_labels.ensure_index([("scan_id",pymongo.ASCENDING),
("atlas_id",pymongo.ASCENDING)])
db.atlases.ensure_index([("name",pymongo.ASCENDING)])
db.scans.ensure_index([("scan_id",pymongo.ASCENDING),("subject_id",pymongo.ASCENDING)])
def upload_atlases(db, trackds, sc):
"""
Reads the atlas info from a Scan and loads the npy files from disk. Then
1) uploads the atlas info into db.atlases
2) uploads the label array for each atlas/scan into db.streamline_labels
3) uploads the streamline ids for each connection in each atlas/scan into connections
"""
try:
atlases = []
logging.info("processing %d atlases for %s", len(trackds.properties.track_label_items), sc.scan_id)
for label in trackds.properties.track_label_items:
#pdb.set_trace()
atlas_labels = label.load_array()
# Does this atlas already exist? If not, add it to the collection.
atlas = None
result = db.atlases.find( { "name": label.name, "parameters": label.parameters } )
if result.count() != 0:
atlas = result[0]["_id"]
else:
atlas = db.atlases.insert( { "name": label.name, "parameters": label.parameters } )
atlases.append(atlas)
db.streamline_labels.insert([
{
"scan_id": sc.scan_id,
"atlas_id": atlas,
"con_ids": list(map(int,atlas_labels))
}
])
# -------------------------------------------
# Insert data into the connections collection
# -------------------------------------------
inserts = []
con_ids = set(atlas_labels)
print "Building connections collection for %s %d..." % (label.name, label.parameters["scale"])
for con_id in con_ids:
sl_ids = list(map(int,np.where(atlas_labels == con_id)[0]))
inserts.append(
{
"con_id":"%d" % con_id,
"scan_id":sc.scan_id,
"atlas_id":atlas,
"sl_ids":sl_ids
}
)
db.connections.insert(inserts)
print "done."
except Exception, e:
print "Failed to upload atlases", e
return False
return True
def upload_streamlines(db, trackds, sc):
"""
Inserts the binary streamline data into db.streamlines
"""
try:
logging.info("Building streamline collection")
inserts = []
for ntrk, trk in enumerate(trackds.tracks):
# continue appending to inserts until it gets too big
if len(inserts) >= 1000:
# then insert it and clear inserts
db.streamlines.insert(inserts)
inserts = []
inserts.append(
{
"scan_id":sc.scan_id,
"sl_id": ntrk,
"data":Binary(pickle.dumps(trk,protocol=2))
}
)
# Finally, insert the leftovers
db.streamlines.insert(inserts)
except Exception, e:
print "Failed to upload streamline data", e
return False
return True
def upload_coordinate_info(db, trackds, sc):
try:
inserts = []
print "Building coordinate collection..."
for coord,indices in trackds.tracks_at_ijk.iteritems():
inserts.append(
{
"ijk":"(%d, %d, %d)" % tuple(map(int,coord)),
"scan_id":sc.scan_id,
"sl_id":list(map(int,indices))
}
)
db.coordinates.insert(inserts)
print "done."
except Exception, e:
print "Failed to upload coordinate info", e
return False
return True
def upload_scan_info(db, trackds, sc):
try:
#db.scans.insert([sc.original_json])
atlases = []
for label in sc.track_label_items:
# Does this atlas already exist? If not, add it to the collection.
atlas = None
result = db.atlases.find( { "name": label.name, "parameters": label.parameters } )
if result.count() != 0:
atlas = result[0]["_id"]
else:
atlas = db.atlases.insert( { "name": label.name, "parameters": label.parameters } )
atlases.append(atlas)
db.scans.insert([
{
"scan_id":sc.scan_id,
"subject_id":sc.subject_id,
"gender":sc.scan_gender,
"age":sc.scan_age,
"study":sc.study,
"group":sc.scan_group,
"smoothing":sc.smoothing,
"cutoff_angle":sc.cutoff_angle,
"qa_threshold":sc.qa_threshold,
"gfa_threshold":sc.gfa_threshold,
"length_min":sc.length_min,
"length_max":sc.length_max,
"institution":sc.institution,
"reconstruction":sc.reconstruction,
"scanner":sc.scanner,
"n_directions":sc.n_directions,
"max_b_value":sc.max_b_value,
"bvals":sc.bvals,
"bvecs":sc.bvecs,
"label":sc.label,
"trk_space":sc.trk_space,
"atlases":list(set(atlases)),
"sls": len(trackds.tracks),
"header":Binary(pickle.dumps(trackds.header,protocol=2)),
"original_json":sc.original_json
}
])
except Exception, e:
print "Failed to upload scan info", e
return False
return True
def upload_local_scan(db, sc):
logging.info("uploading %s", sc.scan_id)
try:
trackds = sc.get_track_dataset()
except:
print "failed to read pkl file"
return False, "pkl file corrupt"
if not upload_atlases(db, trackds, sc):
print "failed to upload atlases"
return False, "upload_atlases"
if not upload_streamlines(db, trackds, sc):
print "failed to upload streamlines"
return False, "upload_streamlines"
if not upload_coordinate_info(db, trackds, sc):
print "failed to upload spatial mapping"
return False, "upload_coordinate_info"
if not upload_scan_info(db, trackds, sc):
print "failed to upload spatial mapping"
return False, "upload scan info"
return True, "hooray!"
def check_scan_for_files(sc):
"""Checks to make sure that all the necessary files for this scan are on disk.
If they are, it returns True, otherwise False
"""
pkl_file = sc.pkl_path
if not os.path.exists(pkl_file):
print "Unable to locate pickle file %s" % pkl_file
logging.error("Unable to locate pickle file %s", pkl_file)
return False
# Check that all the npy files exist
for label in sc.track_label_items:
npy_path = label.numpy_path
if not os.path.exists(npy_path):
print "unable to load %s" % npy_path
logging.error("unable to load %s" % npy_path)
return False
return True
def get_region_ints_from_graphml(graphml):
"""
Returns an array of region ints from a graphml file.
"""
graph = nx.read_graphml(graphml)
return sorted(map(int, graph.nodes()))
def b0_to_qsdr_map(fib_file, b0_atlas, output_v):
"""
Creates a qsdr atlas from a DSI Studio fib file and a b0 atlas.
"""
# Load the mapping from the fib file
fibf = gzip.open(fib_file,"rb")
m = loadmat(fibf)
fibf.close()
volume_dimension = m['dimension'].squeeze().astype(int)
mx = m['mx'].squeeze().astype(int)
my = m['my'].squeeze().astype(int)
mz = m['mz'].squeeze().astype(int)
# Labels in b0 space
_old_atlas = nib.load(b0_atlas)
old_atlas = _old_atlas.get_data()
old_aff = _old_atlas.get_affine()
# QSDR maps from RAS+ space. Force the input volume to conform
if old_aff[0,0] > 0:
print "\t\t+++ Flipping X"
old_atlas = old_atlas[::-1,:,:]
if old_aff[1,1] > 0:
print "\t\t+++ Flipping Y"
old_atlas = old_atlas[:,::-1,:]
if old_aff[2,2] < 0:
print "\t\t+++ Flipping Z"
old_atlas = old_atlas[:,:,::-1]
# XXX: there is an error when importing some of the HCP datasets where the
# map-from index is out of bounds from the b0 image. This will check for
# any indices that would cause an index error and sets them to 0.
bx, by, bz = old_atlas.shape
idx_err_x = np.flatnonzero( mx >= bx)
if len(idx_err_x):
print "\t\t+++ WARNING: %d voxels are out of original data x range" % len(idx_err_x)
mx[idx_err_x] = 0
idx_err_y = np.flatnonzero( my >= by)
if len(idx_err_y):
print "\t\t+++ WARNING: %d voxels are out of original data y range" % len(idx_err_y)
my[idx_err_y] = 0
idx_err_z = np.flatnonzero( mz >= bz)
if len(idx_err_z):
print "\t\t+++ WARNING: %d voxels are out of original data z range" % len(idx_err_z)
mz[idx_err_z] = 0
# Fill up the output atlas with labels from b0, collected through the fib mappings
new_atlas = old_atlas[mx,my,mz].reshape(volume_dimension,order="F")
onim = nib.Nifti1Image(new_atlas,QSDR_AFFINE)
onim.to_filename(output_v)
class lazy_tds(HasTraits):
file_name = File
tds = Property(Instance(TrackDataset))
@cached_property
def _get_tds(self):
# Load the tracks
tds = TrackDataset(self.file_name)
print "\t+ [%s] hashing tracks in qsdr space"
tds.hash_voxels_to_tracks()
print "\t\t++ [%s] Done."
return tds
def create_missing_files(scan):
"""
Creates files on disk that are needed to visualize data
Discrete space indexing
-----------------------
If the file stored in ``pkl_file`` does not exist,
The ``trk_file`` attribute is loaded and indexed in MNI152
space.
Looks into all the track_labels and track_scalars and ensures
that they exist at loading time
"""
## Ensure that the path where pkls are to be stored exists
sid = scan.scan_id
abs_pkl_file = scan.pkl_path
pkl_directory = os.path.split(abs_pkl_file)[0]
if not os.path.exists(pkl_directory):
print "\t+ [%s] making directory for pkl_files" % sid
os.makedirs(pkl_directory)
print "\t\t++ [%s] pkl_directory is" % sid, pkl_directory
abs_trk_file = scan.trk_file
# Check that the pkl file exists, or the trk file
if not os.path.exists(abs_pkl_file):
if not os.path.exists(abs_trk_file):
raise ValueError(abs_trk_file + " does not exist")
# prevent loading and hashing of tracks unless necessary
# to create missing files
tds = lazy_tds(file_name=abs_trk_file, subject_name=sid)
# =========================================================
# Loop over the track labels, creating .npy files as needed
n_labels = len(scan.track_label_items)
print "\t+ [%s] Intersecting"%sid, n_labels, "label datasets"
for lnum, label_source in enumerate(scan.track_label_items):
# Load the mask
# File containing the corresponding label vector
npy_path = label_source.numpy_path
print "\t\t++ [%s] Ensuring %s exists" % (sid, npy_path)
if os.path.exists(npy_path):
print "\t\t++ [%s]"%sid, npy_path, "already exists"
continue
# Check to see if the qsdr volume exists. If not, create it from
# the B0 volume
abs_qsdr_path = label_source.qsdr_volume_path
abs_b0_path = label_source.b0_volume_path
abs_fib_file = scan.fib_file
if not os.path.exists(abs_qsdr_path):
# If neither volume exists, the data is incomplete
if not os.path.exists(abs_b0_path):
print "\t\t++ [%s] ERROR: must have a b0 volume and .map.fib.gz OR a qsdr_volume"%sid
continue
print "\t\t++ [%s] mapping b0 labels to qsdr space"%sid
b0_to_qsdr_map(abs_fib_file, abs_b0_path,
abs_qsdr_path)
print "\t\t++ [%s] Loading volume %d/%d:\n\t\t\t %s" % (
sid, lnum + 1, n_labels, abs_qsdr_path )
mds = MaskDataset(abs_qsdr_path)
# Get the region labels from the parcellation
graphml = graphml_from_label_source(label_source)
if graphml is None:
print "\t\t++ [%s] No graphml exists: using unique region labels"%sid
regions = mds.roi_ids
else:
print "\t\t++ [%s] Recognized atlas name, using Lausanne2008 atlas"%sid, graphml
regions = get_region_ints_from_graphml(graphml)
# Save it.
conn_ids = connection_ids_from_tracks(mds, tds.tds,
save_npy=npy_path,
scale_coords=tds.tds.header['voxel_size'],
region_ints=regions)
print "\t\t++ [%s] Saved %s" % (sid, npy_path)
print "\t\t\t*** [%s] %.2f percent streamlines not accounted for by regions"%( sid, 100. * np.sum(conn_ids==0)/len(conn_ids) )
# =========================================================
# Loop over the track scalars, creating .npy files as needed
print "\t Dumping trakl GFA/QA values"
for label_source in scan.track_scalar_items:
# File containing the corresponding label vector
npy_path = label_source.numpy_path if \
os.path.isabs(label_source.numpy_path) else \
os.path.join(scan.pkl_dir,label_source.numpy_path)
if os.path.exists(npy_path):
print npy_path, "already exists"
continue
print "\t\t++ saving values to", npy_path
fop = open(label_source.txt_path,"r")
scalars = np.array(
[np.fromstring(line,sep=" ").mean() for line in fop] )
fop.close()
np.save(npy_path,scalars)
print "\t\t++ Done."
if not os.path.isabs(scan.pkl_trk_path):
abs_pkl_trk_file = os.path.join(output_dir,scan.pkl_trk_path)
else:
abs_pkl_trk_file = scan.pkl_trk_path
print "\t+ Dumping MNI152 hash table"
tds.tds.dump_qsdr2MNI_track_lookup(abs_pkl_file,abs_pkl_trk_file)
return True
scan_table = TableEditor(
columns =
[ ObjectColumn(name="scan_id",editable=True),
ObjectColumn(name="study",editable=True),
ObjectColumn(name="scan_group",editable=True),
ObjectColumn(name="software",editable=True),
ObjectColumn(name="reconstruction",editable=True),
],
deletable = True,
auto_size = True,
show_toolbar = True,
edit_view="import_view",
row_factory=Scan
#edit_view_height=500,
#edit_view_width=500,
)
class MongoCreator(HasTraits):
database_dir = File()
log_path = File()
b_start = Button("Start mongod")
restrict_ips = Bool(True)
numactl_interleave = Bool(False)
port = Str("27017")
def get_command(self):
cmd = []
if self.numactl_interleave:
cmd += ["numactl", "--interleave=all" ]
cmd += ["mongod", "--fork", "--dbpath", self.database_dir,
"--logpath", self.log_path, "--port", self.port ]
if self.restrict_ips:
cmd += ["--bind_ip", "127.0.0.1"]
return cmd
def get_connection(self):
conn = pymongo.MongoClient(
port=int(self.port),host="localhost")
return conn
def _b_start_fired(self):
print "Starting mongod"
cmd = self.get_command()
print cmd
if not os.path.exists(self.database_dir):
os.makedirs(self.database_dir)
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,shell=False)
result = proc.communicate()
print result
return result
def shutdown(self):
conn = self.get_connection()
dba = conn.admin
try:
dba.command({"shutdown":1})
except Exception, e:
print e
traits_view = View(
VGroup(
Item("database_dir"),
Item("log_path"),
Item("restrict_ips"),
Item("port"),
Item("numactl_interleave"),
Group(
Item("b_start"), show_labels=False)
), title="MongoDB Connection"
)
class LocalDataImporter(HasTraits):
"""
Holds a list of Scan objects. These can be loaded from
and saved to a json file.
"""
json_file = File()
datasets = List(Instance(Scan))
save = Button()
mongo_creator = Instance(MongoCreator)
upload_to_mongodb = Button()
connect_to_mongod = Button()
process_inputs = Button()
input_directory = File()
output_directory = File()
n_processors = Int(1)
def _connect_to_mongod_fired(self):
self.mongo_creator.edit_traits()
def _mongo_creator_default(self):
return MongoCreator()
def _json_file_changed(self):
if not os.path.exists(self.json_file):
print "no such file", self.json_file
return
fop = open(self.json_file, "r")
jdata = json.load(fop)
fop.close()
self.datasets = [
Scan(pkl_dir=self.output_directory,
data_dir="", **d) for d in jdata]
def _save_fired(self):
json_data = [scan.to_json() for scan in self.datasets]
with open(self.json_file,"w") as outfile:
json.dump(json_data,outfile,indent=4)
print "Saved", self.json_file
pass
def _process_inputs_fired(self):
print "Processing input data"
if self.n_processors > 1:
print "Using %d processors" % self.n_processors
pool = multiprocessing.Pool(processes=self.n_processors)
result = pool.map(create_missing_files, self.datasets)
pool.close()
pool.join()
else:
for scan in self.datasets:
create_missing_files(scan)
print "Finished!"
def _upload_to_mongodb_fired(self):
print "Connecting to mongodb"
try:
connection = self.mongo_creator.get_connection()
except Exception:
print "unable to establish connection with mongod"
return
db = connection.dsi2
print "initializing dsi2 indexes"
init_db(db)
print "Uploading to MongoDB"
for scan in self.datasets:
print "\t+ Uploading", scan.scan_id
if not check_scan_for_files(scan):
raise ValueError("Missing files found for " + scan.scan_id)
upload_succeeded, because = upload_local_scan(db, scan)
if not upload_succeeded:
raise ValueError(because)
# UI definition for the local db
traits_view = View(
Item("json_file"),
Group(
Item("datasets",editor = scan_table),
orientation="horizontal",
show_labels=False
),
Group(
Item("save"),
Item("process_inputs"),
Item("connect_to_mongod"),
Item("upload_to_mongodb"),
orientation="horizontal",
show_labels=False
),
Item("n_processors"),
resizable=True,
width=900,
height=500,
title="Import Tractography Data"
)
| mattcieslak/DSI2 | dsi2/ui/local_data_importer.py | Python | gpl-3.0 | 21,479 |
from module.plugins.Hoster import Hoster
from module.common.json_layer import json_loads
from module.network.HTTPRequest import BadHeader
class ReloadCc(Hoster):
__name__ = "ReloadCc"
__version__ = "0.5"
__type__ = "hoster"
__description__ = """Reload.Cc hoster plugin"""
# Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady
# create the regex patterns for us using getHosters in our ReloadCc hook.
__pattern__ = None
__author_name__ = ("Reload Team")
__author_mail__ = ("hello@reload.cc")
def process(self, pyfile):
# Check account
if not self.account or not self.account.canUse():
self.logError(_("Please enter your %s account or deactivate this plugin") % "reload.cc")
self.fail("No valid reload.cc account provided")
# In some cases hostsers do not supply us with a filename at download, so we
# are going to set a fall back filename (e.g. for freakshare or xfileshare)
self.pyfile.name = self.pyfile.name.split('/').pop() # Remove everthing before last slash
# Correction for automatic assigned filename: Removing html at end if needed
suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
temp = self.pyfile.name.split('.')
if temp.pop() in suffix_to_remove:
self.pyfile.name = ".".join(temp)
# Get account data
(user, data) = self.account.selectAccount()
query_params = dict(
via='pyload',
v=1,
user=user,
uri=self.pyfile.url
)
try:
query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
except Exception:
query_params.update(dict(pwd=data['password']))
try:
answer = self.load("http://api.reload.cc/dl", get=query_params)
except BadHeader, e:
if e.code == 400:
self.fail("The URI is not supported by Reload.cc.")
elif e.code == 401:
self.fail("Wrong username or password")
elif e.code == 402:
self.fail("Your account is inactive. A payment is required for downloading!")
elif e.code == 403:
self.fail("Your account is disabled. Please contact the Reload.cc support!")
elif e.code == 409:
self.logWarning("The hoster seems to be a limited hoster and you've used your daily traffic for this hoster: %s" % self.pyfile.url)
# Wait for 6 hours and retry up to 4 times => one day
self.retry(max_retries=4, wait_time=(3600 * 6), reason="Limited hoster traffic limit exceeded")
elif e.code == 429:
# Too many connections, wait 2 minutes and try again
self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections")
elif e.code == 503:
# Retry in 10 minutes
self.retry(wait_time=600,
reason="Reload.cc is currently in maintenance mode! Please check again later.")
else:
self.fail(
"Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
return
data = json_loads(answer)
# Check status and decide what to do
status = data.get('status', None)
if status == "ok":
conn_limit = data.get('msg', 0)
# API says these connections are limited
# Make sure this limit is used - the download will fail if not
if conn_limit > 0:
try:
self.limitDL = int(conn_limit)
except ValueError:
self.limitDL = 1
else:
self.limitDL = 0
try:
self.download(data['link'], disposition=True)
except BadHeader, e:
if e.code == 404:
self.fail("File Not Found")
elif e.code == 412:
self.fail("File access password is wrong")
elif e.code == 417:
self.fail("Password required for file access")
elif e.code == 429:
# Too many connections, wait 2 minutes and try again
self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections")
else:
self.fail(
"Internal error within Reload.cc. Please contact the Reload.cc support for further information."
)
return
else:
self.fail("Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
| Rusk85/pyload | module/plugins/hoster/ReloadCc.py | Python | gpl-3.0 | 4,866 |
from setuptools import setup
setup (
name='ujs-jsonvalidate',
version='0.1.0',
description='JSON validation utility',
long_description=open('README.rst').read(),
url='http://github.com/usingjsonschema/ujs-jsonvalidate-python',
author='Joe McIntyre',
author_email='j_h_mcintyre@yahoo.com',
keywords='bookujs json json-schema',
license='MIT',
packages=['jsonvalidate'],
install_requires=['jsonschema', 'ujs-safefile'],
entry_points={
'console_scripts':[
'validate=jsonvalidate.main:main',
'validatep=jsonvalidate.main:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development'
])
| usingjsonschema/ujs-jsonvalidate-python | setup.py | Python | mit | 1,154 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubIpConfigurationOperations(object):
"""VirtualHubIpConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.HubIpConfiguration"
"""Retrieves the details of a Virtual Hub Ip configuration.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubIpConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.HubIpConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
parameters, # type: "_models.HubIpConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.HubIpConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'HubIpConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
parameters, # type: "_models.HubIpConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.HubIpConfiguration"]
"""Creates a VirtualHubIpConfiguration resource if it doesn't exist else updates the existing
VirtualHubIpConfiguration.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:param parameters: Hub Ip Configuration parameters.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.HubIpConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either HubIpConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.HubIpConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
ip_config_name=ip_config_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHubIpConfiguration.
:param resource_group_name: The resource group name of the VirtualHubBgpConnection.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
ip_config_name=ip_config_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubIpConfigurationResults"]
"""Retrieves the details of all VirtualHubIpConfigurations.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubIpConfigurationResults or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.ListVirtualHubIpConfigurationResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubIpConfigurationResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubIpConfigurationResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_virtual_hub_ip_configuration_operations.py | Python | mit | 22,118 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for state domain objects and methods defined on them."""
import functools
import logging
import os
import re
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import state_domain
from core.tests import test_utils
import feconf
import utils
def mock_get_filename_with_dimensions(filename, unused_exp_id):
return html_validation_service.regenerate_image_filename_using_dimensions(
filename, 490, 120)
class StateDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': ''
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': {
'content_id': 'default_outcome',
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
}
}
self.assertEqual(expected_dict, state_dict)
def test_can_undergo_classification(self):
"""Test the can_undergo_classification() function."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exploration_id)
state_with_training_data = exploration.states['Home']
state_without_training_data = exploration.states['End']
# A state with 786 training examples.
self.assertTrue(
state_with_training_data.can_undergo_classification())
# A state with no training examples.
self.assertFalse(
state_without_training_data.can_undergo_classification())
def test_get_training_data(self):
"""Test retrieval of training data."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.SAMPLE_EXPLORATIONS_DIR, 'classifier_demo_exploration.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exploration_id)
state = exploration.states['text']
expected_training_data = [{
'answer_group_index': 1,
'answers': [u'cheerful', u'merry', u'ecstatic', u'glad',
u'overjoyed', u'pleased', u'thrilled', u'smile']}]
observed_training_data = state.get_training_data()
self.assertEqual(observed_training_data, expected_training_data)
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state.
exploration.add_states(['END'])
# Should fail to rename like any other state.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END.
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions.
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation.
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.update_interaction_default_outcome(None)
exploration.validate(strict=True)
# Name it back for final tests.
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it.
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
def test_update_solicit_answer_details(self):
"""Test updating solicit_answer_details."""
state = state_domain.State.create_default_state('state_1')
self.assertEqual(state.solicit_answer_details, False)
state.update_solicit_answer_details(True)
self.assertEqual(state.solicit_answer_details, True)
def test_update_solicit_answer_details_with_non_bool_fails(self):
"""Test updating solicit_answer_details with non bool value."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
with self.assertRaisesRegexp(Exception, (
'Expected solicit_answer_details to be a boolean, received')):
init_state.update_solicit_answer_details('abc')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
def test_convert_html_fields_in_state(self):
"""Test conversion of html strings in state."""
state_dict = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [], 'feedback': {
'content_id': 'default_outcome', 'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': None,
'hints': []
}
}
state_dict_in_textangular = {
'content': {
'content_id': 'content', 'html': '<p>Hello!</p>'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [], 'feedback': {
'content_id': 'default_outcome', 'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': None,
'hints': []
}
}
state_dict_with_image_caption = {
'content': {
'content_id': 'content', 'html': '<p>Hello!</p>'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [], 'feedback': {
'content_id': 'default_outcome', 'html': (
'<p><oppia-noninteractive-image caption-'
'with-value="&quot;&quot;" filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': None,
'hints': []
}
}
state_dict_with_image_dimensions = {
'content': {
'content_id': 'content', 'html': '<p>Hello!</p>'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [], 'feedback': {
'content_id': 'default_outcome', 'html': (
u'<p><oppia-noninteractive-image '
'caption-with-value="&quot;&quot;" '
'filepath-with-value="&quot;'
'random_height_490_width_120.png&'
'quot;"></oppia-noninteractive-image>Hello this '
'is test case to check image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': None,
'hints': []
}
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict,
html_validation_service.convert_to_textangular),
state_dict_in_textangular)
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict,
html_validation_service.add_caption_attr_to_image),
state_dict_with_image_caption)
add_dimensions_to_image_tags = functools.partial(
html_validation_service.add_dimensions_to_image_tags,
'eid')
with self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions):
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict, add_dimensions_to_image_tags),
state_dict_with_image_dimensions)
def test_subtitled_html_validation_with_invalid_html_type(self):
"""Test validation of subtitled HTML with invalid html type."""
subtitled_html = state_domain.SubtitledHtml(
'content_id', '<p>some html</p>')
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid content HTML'
):
with self.swap(subtitled_html, 'html', 20):
subtitled_html.validate()
def test_subtitled_html_validation_with_invalid_content(self):
"""Test validation of subtitled HTML with invalid content."""
subtitled_html = state_domain.SubtitledHtml(
'content_id', '<p>some html</p>')
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content id to be a string, ' +
'received 20'):
with self.swap(subtitled_html, 'content_id', 20):
subtitled_html.validate()
def test_voiceover_validation(self):
"""Test validation of voiceover."""
audio_voiceover = state_domain.Voiceover('a.mp3', 20, True)
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected audio filename to be a string'
):
with self.swap(audio_voiceover, 'filename', 20):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', '.invalidext'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', 'justanextension'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', 'a.invalidext'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected file size to be an int'
):
with self.swap(audio_voiceover, 'file_size_bytes', 'abc'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid file size'
):
with self.swap(audio_voiceover, 'file_size_bytes', -3):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected needs_update to be a bool'
):
with self.swap(audio_voiceover, 'needs_update', 'hello'):
audio_voiceover.validate()
def test_written_translation_validation(self):
"""Test validation of translation script."""
written_translation = state_domain.WrittenTranslation('Test.', True)
written_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid content HTML'):
with self.swap(written_translation, 'html', 30):
written_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected needs_update to be a bool'
):
with self.swap(written_translation, 'needs_update', 20):
written_translation.validate()
def test_hints_validation(self):
"""Test validation of state hints."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
hints_list = []
hints_list.append({
'hint_content': {
'content_id': 'hint_1',
'html': '<p>hint one</p>'
},
})
init_state.update_interaction_hints(hints_list)
solution = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
init_state.update_interaction_solution(solution)
exploration.validate()
hints_list.append({
'hint_content': {
'content_id': 'hint_2',
'html': '<p>new hint</p>'
}
})
init_state.update_interaction_hints(hints_list)
self.assertEqual(
init_state.interaction.hints[1].hint_content.html,
'<p>new hint</p>')
hints_list.append({
'hint_content': {
'content_id': 'hint_3',
'html': '<p>hint three</p>'
}
})
init_state.update_interaction_hints(hints_list)
del hints_list[1]
init_state.update_interaction_hints(hints_list)
self.assertEqual(len(init_state.interaction.hints), 2)
exploration.validate()
def test_solution_validation(self):
"""Test validation of state solution."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
# Solution should be set to None as default.
self.assertEqual(init_state.interaction.solution, None)
hints_list = []
hints_list.append({
'hint_content': {
'content_id': 'hint_1',
'html': ''
},
})
init_state.update_interaction_hints(hints_list)
solution = {
'answer_is_exclusive': False,
'correct_answer': [0, 0],
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
# Object type of answer must match that of correct_answer.
with self.assertRaises(AssertionError):
init_state.interaction.solution = (
state_domain.Solution.from_dict(
init_state.interaction.id, solution))
solution = {
'answer_is_exclusive': False,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
init_state.update_interaction_solution(solution)
exploration.validate()
def test_validate_state_solicit_answer_details(self):
"""Test validation of solicit_answer_details."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected solicit_answer_details to be ' +
'a boolean, received'):
with self.swap(init_state, 'solicit_answer_details', 'abc'):
exploration.validate()
self.assertEqual(init_state.solicit_answer_details, False)
init_state.update_interaction_id('Continue')
self.assertEqual(init_state.interaction.id, 'Continue')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'The Continue interaction does not ' +
'support soliciting answer details from learners.'):
with self.swap(init_state, 'solicit_answer_details', True):
exploration.validate()
init_state.update_interaction_id('TextInput')
self.assertEqual(init_state.interaction.id, 'TextInput')
self.assertEqual(init_state.solicit_answer_details, False)
exploration.validate()
init_state.solicit_answer_details = True
self.assertEqual(init_state.solicit_answer_details, True)
exploration.validate()
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, True)
def test_validate_solution_answer_is_exclusive(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
# Solution should be set to None as default.
self.assertEqual(exploration.init_state.interaction.solution, None)
hints_list = []
solution = {
'answer_is_exclusive': False,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
hints_list.append({
'hint_content': {
'content_id': 'hint_1',
'html': ''
},
})
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(solution)
exploration.validate()
solution = {
'answer_is_exclusive': 1,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
exploration.init_state.update_interaction_solution(solution)
with self.assertRaisesRegexp(
Exception, 'Expected answer_is_exclusive to be bool, received 1'):
exploration.validate()
def test_validate_non_list_param_changes(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
exploration.init_state.param_changes = 0
with self.assertRaisesRegexp(
Exception, 'Expected state param_changes to be a list, received 0'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_answer_groups(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_group_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
exploration.init_state.update_interaction_answer_groups(
[answer_group_dict])
exploration.init_state.update_content({
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
})
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id feedback_1'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_default_outcome(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
default_outcome_dict = {
'dest': 'Introduction',
'feedback': {
'content_id': 'default_outcome',
'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None
}
exploration.init_state.update_interaction_default_outcome(
default_outcome_dict)
exploration.init_state.update_content({
'content_id': 'default_outcome',
'html': ''
})
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id default_outcome'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_hints(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>some html</p>'
}
}]
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_content({
'content_id': 'hint_1',
'html': ''
})
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id hint_1'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
subtitled_html = state_domain.SubtitledHtml('content_id', 'some html')
hints_list = [state_domain.Hint(subtitled_html)]
exploration.init_state.interaction.hints = hints_list
solution = {
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
exploration.init_state.update_interaction_solution(solution)
exploration.init_state.update_content({
'content_id': 'solution',
'html': ''
})
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id solution'):
exploration.init_state.validate(None, True)
def test_cannot_convert_state_dict_to_yaml_with_invalid_state_dict(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'info', _mock_logging_function)
invalid_state_dict_assert_raises = self.assertRaisesRegexp(
Exception, 'Could not convert state dict to YAML')
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with logging_swap, invalid_state_dict_assert_raises:
exploration.init_state.convert_state_dict_to_yaml(
'invalid_state_dict', 10)
self.assertEqual(
observed_log_messages, ['Bad state dict: invalid_state_dict'])
def test_cannot_update_hints_with_content_id_not_in_written_translations(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
new_hints_list = [{
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
exploration.init_state.update_interaction_hints(old_hints_list)
written_translations_dict = {
'translations_mapping': {
'content': {
'hi': {
'html': '<p>Test!</p>',
'needs_update': True
}
},
'default_outcome': {}
}
}
written_translations = (
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
exploration.init_state.update_written_translations(written_translations)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_1 does not exist in written_translations'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_content_id_not_in_recorded_voiceovers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
new_hints_list = [{
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
exploration.init_state.update_interaction_hints(old_hints_list)
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False
}
},
'default_outcome': {}
}
}
recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
exploration.init_state.update_recorded_voiceovers(recorded_voiceovers)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_1 does not exist in recorded_voiceovers'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_new_content_id_in_written_translations(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
new_hints_list = [{
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
exploration.init_state.update_interaction_hints(old_hints_list)
written_translations_dict = {
'translations_mapping': {
'hint_2': {
'hi': {
'html': '<p>Test!</p>',
'needs_update': True
}
},
'hint_1': {
'hi': {
'html': '<p>Test1!</p>',
'needs_update': True
}
},
'default_outcome': {}
}
}
written_translations = (
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
exploration.init_state.update_written_translations(written_translations)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_2 already exists in written_translations'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_new_content_id_in_recorded_voiceovers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
new_hints_list = [{
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
exploration.init_state.update_interaction_hints(old_hints_list)
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'hint_1': {
'en': {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False
}
},
'hint_2': {
'en': {
'filename': 'filename4.mp3',
'file_size_bytes': 3000,
'needs_update': False
}
},
'default_outcome': {}
}
}
recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
exploration.init_state.update_recorded_voiceovers(recorded_voiceovers)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_2 already exists in recorded_voiceovers'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_interaction_solution_with_non_dict_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
solution = {
'answer_is_exclusive': True,
'correct_answer': u'hello_world!',
'explanation': {
'content_id': 'solution',
'html': u'<p>hello_world is a string</p>'
}
}
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(solution)
self.assertEqual(
exploration.init_state.interaction.solution.to_dict(), solution)
with self.assertRaisesRegexp(
Exception, 'Expected solution to be a dict'):
exploration.init_state.update_interaction_solution([])
def test_update_interaction_solution_with_no_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(None)
self.assertIsNone(exploration.init_state.interaction.solution)
def test_cannot_update_interaction_hints_with_non_list_hints(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected hints_list to be a list'):
exploration.init_state.update_interaction_hints({})
def test_cannot_update_non_list_interaction_confirmed_unclassified_answers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected confirmed_unclassified_answers to be a list'):
(
exploration.init_state
.update_interaction_confirmed_unclassified_answers({}))
def test_update_interaction_confirmed_unclassified_answers(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_groups_list = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
self.assertEqual(
exploration.init_state.interaction.confirmed_unclassified_answers,
[])
(
exploration.init_state
.update_interaction_confirmed_unclassified_answers(
answer_groups_list)
)
self.assertEqual(
exploration.init_state.interaction.confirmed_unclassified_answers,
answer_groups_list)
def test_cannot_update_non_dict_interaction_default_outcome(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected default_outcome_dict to be a dict'):
exploration.init_state.update_interaction_default_outcome(
'invalid_default_outcome')
def test_cannot_update_non_list_interaction_answer_groups(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected interaction_answer_groups to be a list'):
exploration.init_state.update_interaction_answer_groups(
'invalid_answer_groups')
def test_cannot_update_answer_groups_with_non_dict_rule_inputs(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_groups_list = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': [],
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
with self.assertRaisesRegexp(
Exception, 'Expected rule_inputs to be a dict'):
exploration.init_state.update_interaction_answer_groups(
answer_groups_list)
def test_cannot_update_answer_groups_with_non_list_rule_specs(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_groups_list = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': {},
'training_data': [],
'tagged_skill_misconception_id': None
}]
with self.assertRaisesRegexp(
Exception, 'Expected answer group rule specs to be a list'):
exploration.init_state.update_interaction_answer_groups(
answer_groups_list)
def test_cannot_update_answer_groups_with_invalid_rule_input_value(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_groups_list = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': []
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
with self.assertRaisesRegexp(
Exception,
re.escape(
'[] has the wrong type. It should be a NormalizedString.')):
exploration.init_state.update_interaction_answer_groups(
answer_groups_list)
def test_validate_rule_spec(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'warning', _mock_logging_function)
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
answer_groups = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
exploration.init_state.update_interaction_answer_groups(answer_groups)
with logging_swap, self.assertRaises(KeyError):
(
exploration.init_state.interaction.answer_groups[0]
.rule_specs[0].validate([], {})
)
self.assertEqual(
observed_log_messages,
[
'RuleSpec \'Contains\' has inputs which are not recognized '
'parameter names: set([\'x\'])'
]
)
class WrittenTranslationsDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on written transcripts."""
def test_from_and_to_dict_wroks_correctly(self):
written_translations_dict = {
'translations_mapping': {
'content1': {
'en': {
'html': 'hello',
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': 'Testing!',
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
self.assertEqual(
written_translations.to_dict(), written_translations_dict)
def test_get_content_ids_for_text_translation_return_correct_list_of_content_id(self): # pylint: disable=line-too-long
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
self.assertEqual(
written_translations.get_content_ids_for_text_translation(), [])
written_translations.add_content_id_for_translation('feedback_1')
written_translations.add_content_id_for_translation('feedback_2')
self.assertEqual(
written_translations.get_content_ids_for_text_translation(), [
'feedback_2', 'feedback_1'])
def test_add_content_id_for_translations_adds_content_id(self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 0)
new_content_id = 'content_id'
written_translations.add_content_id_for_translation(new_content_id)
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 1)
self.assertEqual(
written_translations.get_content_ids_for_text_translation(),
['content_id'])
def test_add_content_id_for_translation_with_invalid_content_id_raise_error(
self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
invalid_content_id = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, received 123'):
written_translations.add_content_id_for_translation(
invalid_content_id)
def test_add_content_id_for_translation_with_existing_content_id_raise_error( # pylint: disable=line-too-long
self):
written_translations_dict = {
'translations_mapping': {
'feedback_1': {
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
existing_content_id = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 already exist.'):
written_translations.add_content_id_for_translation(
existing_content_id)
def test_delete_content_id_for_translations_deletes_content_id(self):
old_written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
old_written_translations_dict)
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 1)
written_translations.delete_content_id_for_translation('content')
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 0)
def test_delete_content_id_for_translation_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long
written_translations_dict = {
'translations_mapping': {
'content': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
nonexisting_content_id_to_delete = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 does not exist.'):
written_translations.delete_content_id_for_translation(
nonexisting_content_id_to_delete)
def test_delete_content_id_for_translation_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
invalid_content_id_to_delete = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
written_translations.delete_content_id_for_translation(
invalid_content_id_to_delete)
def test_validation_with_invalid_content_id_raise_error(self):
written_translations_dict = {
'translations_mapping': {
123: {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
written_translations.validate([123])
def test_validate_non_dict_language_code_to_written_translation(self):
written_translations = state_domain.WrittenTranslations({
'en': []
})
with self.assertRaisesRegexp(
Exception,
re.escape('Expected content_id value to be a dict, received []')):
written_translations.validate(None)
def test_validation_with_invalid_type_langauge_code_raise_error(self):
written_translations_dict = {
'translations_mapping': {
'content': {
123: {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, '):
written_translations.validate(['content'])
def test_validation_with_unknown_langauge_code_raise_error(self):
written_translations_dict = {
'translations_mapping': {
'content': {
'ed': {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'):
written_translations.validate(['content'])
def test_validation_with_invalid_content_id_list(self):
written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'html': '<p>hello!</p>',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception,
re.escape(
'Expected state written_translations to match the listed '
'content ids [\'invalid_content\']')):
written_translations.validate(['invalid_content'])
class RecordedVoiceoversDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on recorded voiceovers."""
def test_from_and_to_dict_wroks_correctly(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content1': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': True
},
'hi': {
'filename': 'abc.mp3',
'file_size_bytes': 1234,
'needs_update': False
}
},
'feedback_1': {
'hi': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
},
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
self.assertEqual(
recorded_voiceovers.to_dict(), recorded_voiceovers_dict)
def test_get_content_ids_for_voiceovers_return_correct_list_of_content_id(self): # pylint: disable=line-too-long
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
self.assertEqual(
recorded_voiceovers.get_content_ids_for_voiceovers(), [])
recorded_voiceovers.add_content_id_for_voiceover('feedback_1')
recorded_voiceovers.add_content_id_for_voiceover('feedback_2')
self.assertEqual(recorded_voiceovers.get_content_ids_for_voiceovers(), [
'feedback_2', 'feedback_1'])
def test_add_content_id_for_voiceovers_adds_content_id(self):
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 0)
new_content_id = 'content_id'
recorded_voiceovers.add_content_id_for_voiceover(new_content_id)
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 1)
self.assertEqual(
recorded_voiceovers.get_content_ids_for_voiceovers(),
['content_id'])
def test_add_content_id_for_voiceover_with_invalid_content_id_raise_error(
self):
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
invalid_content_id = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, received 123'):
recorded_voiceovers.add_content_id_for_voiceover(
invalid_content_id)
def test_add_content_id_for_voiceover_with_existing_content_id_raise_error( # pylint: disable=line-too-long
self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'feedback_1': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
existing_content_id = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 already exist.'):
recorded_voiceovers.add_content_id_for_voiceover(
existing_content_id)
def test_delete_content_id_for_voiceovers_deletes_content_id(self):
old_recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
old_recorded_voiceovers_dict)
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 1)
recorded_voiceovers.delete_content_id_for_voiceover('content')
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 0)
def test_delete_content_id_for_voiceover_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
nonexisting_content_id_to_delete = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 does not exist.'):
recorded_voiceovers.delete_content_id_for_voiceover(
nonexisting_content_id_to_delete)
def test_delete_content_id_for_voiceover_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
invalid_content_id_to_delete = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
recorded_voiceovers.delete_content_id_for_voiceover(
invalid_content_id_to_delete)
def test_validation_with_invalid_content_id_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
123: {}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
recorded_voiceovers.validate([123])
def test_validate_non_dict_language_code_to_voiceover(self):
recorded_voiceovers = state_domain.RecordedVoiceovers({
'en': []
})
with self.assertRaisesRegexp(
Exception,
re.escape('Expected content_id value to be a dict, received []')):
recorded_voiceovers.validate(None)
def test_validation_with_invalid_type_langauge_code_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
123: {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, '):
recorded_voiceovers.validate(['content'])
def test_validation_with_unknown_langauge_code_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'ed': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'):
recorded_voiceovers.validate(['content'])
def test_validation_with_invalid_content_id_list(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception,
re.escape(
'Expected state recorded_voiceovers to match the listed '
'content ids [\'invalid_content\']')):
recorded_voiceovers.validate(['invalid_content'])
class VoiceoverDomainTests(test_utils.GenericTestBase):
def setUp(self):
super(VoiceoverDomainTests, self).setUp()
self.voiceover = state_domain.Voiceover('filename.mp3', 10, False)
def test_validate_non_str_filename(self):
self.voiceover.validate()
self.voiceover.filename = 0
with self.assertRaisesRegexp(
Exception, 'Expected audio filename to be a string'):
self.voiceover.validate()
def test_validate_filename(self):
self.voiceover.validate()
self.voiceover.filename = 'invalid_filename'
with self.assertRaisesRegexp(Exception, 'Invalid audio filename'):
self.voiceover.validate()
def test_validate_audio_extension(self):
self.voiceover.validate()
self.voiceover.filename = 'filename.png'
with self.assertRaisesRegexp(
Exception,
re.escape(
'Invalid audio filename: it should have one of the following '
'extensions: %s' % feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())):
self.voiceover.validate()
def test_validate_non_int_file_size_bytes(self):
self.voiceover.validate()
self.voiceover.file_size_bytes = 'file_size_bytes'
with self.assertRaisesRegexp(
Exception, 'Expected file size to be an int'):
self.voiceover.validate()
def test_validate_negative_file_size_bytes(self):
self.voiceover.validate()
self.voiceover.file_size_bytes = -1
with self.assertRaisesRegexp(Exception, 'Invalid file size'):
self.voiceover.validate()
def test_validate_non_bool_needs_update(self):
self.voiceover.validate()
self.voiceover.needs_update = 'needs_update'
with self.assertRaisesRegexp(
Exception, 'Expected needs_update to be a bool'):
self.voiceover.validate()
| souravbadami/oppia | core/domain/state_domain_test.py | Python | apache-2.0 | 64,983 |
#!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "Alex Drlica-Wagner"
import numpy as np
import ugali.analysis.results
def test_surface_brightness():
# Draco values from McConnachie et al. 2012
abs_mag = -8.8
app_mag = 10.6
distance = 76 #kpc
distance_modulus = 19.40
a_half_arcmin = 10.0 # arcmin
a_physical_kpc = 0.221 # kpc
ellip = 0.31
mu0 = 26.1 # mag/arcsec^2
# Convert to azimuthally average
r_half_arcmin = a_half_arcmin * np.sqrt(1-ellip)
r_physical_kpc = a_physical_kpc * np.sqrt(1-ellip)
mu1 = ugali.analysis.results.surfaceBrightness(abs_mag, r_physical_kpc, distance)
mu2 = ugali.analysis.results.surfaceBrightness2(app_mag, r_half_arcmin)
np.testing.assert_allclose(mu1,mu0,atol=2e-2)
np.testing.assert_allclose(mu2,mu0,atol=2e-2)
| kadrlica/ugali | tests/test_results.py | Python | mit | 836 |
from django.conf.urls.defaults import url, patterns
from projects.constants import LANGUAGES_REGEX
from urls import urlpatterns as main_patterns
urlpatterns = patterns(
'', # base view, flake8 complains if it is on the previous line.
url((r'^projects/(?P<project_slug>[\w.-]+)/(?P<lang_slug>\w{2})/'
r'(?P<version_slug>[\w.-]+)/(?P<filename>.*)$'),
'core.views.subproject_serve_docs',
name='subproject_docs_detail'),
url(r'^projects/(?P<project_slug>[\w.-]+)',
'core.views.subproject_serve_docs',
name='subproject_docs_detail'),
url(r'^projects/$',
'core.views.subproject_list',
name='subproject_docs_list'),
url(r'^(?P<lang_slug>\w{2})/(?P<version_slug>[\w.-]+)/(?P<filename>.*)$',
'core.views.serve_docs',
name='docs_detail'),
url(r'^(?P<lang_slug>\w{2})/(?P<version_slug>.*)/$',
'core.views.serve_docs',
{'filename': 'index.html'},
name='docs_detail'),
url(r'^page/(?P<filename>.*)$',
'core.views.redirect_page_with_filename',
name='docs_detail'),
url(r'^(?P<lang_slug>%s)/$' % LANGUAGES_REGEX,
'core.views.redirect_lang_slug',
name='lang_subdomain_handler'),
url(r'^(?P<version_slug>.*)/$',
'core.views.redirect_version_slug',
name='version_subdomain_handler'),
url(r'^$', 'core.views.redirect_project_slug'),
)
urlpatterns += main_patterns
| nyergler/pythonslides | readthedocs/core/subdomain_urls.py | Python | mit | 1,443 |
import asyncio as aio
import datetime
from collections import OrderedDict as ODict
from typing import Text
def test(s, cond, td):
# type: (Text, aio.Condition, datetime.timedelta) -> ODict
return ODict()
| smmribeiro/intellij-community | python/testData/refactoring/move/withImportedFunctionTypeComments/after/src/dst.py | Python | apache-2.0 | 214 |
from general_imports import *
from generic_functions import *
import views
def get_movies_by_age(request):
# Remove the last letter 's' from the year and convert to integer.
age_1 = int(request.GET['value'][:-1])
# add a range of 9 years so it completes the decade, ex. 199 8
age_2 = str(age_1 + 9)
movie_filter = Title.objects.filter(year__range=[age_1, age_2])
movies_from_age = views.get_movies(movie_filter, 20)
movies_dict = {}
movies_list = []
for k, i in enumerate(movies_from_age):
movies_list.append({})
movies_list[k]["director"] = str(i['director'])
movies_list[k]["movie_name"] = str(i['name'])
movies_list[k]["slug"] = str(i['slug'])
movies_list[k]["genre"] = i['genre']#[0]
movies_list[k]["year"] = str(i['year'])
movies_list[k]["rating"] = i['rating']
movies_list[k]["opinion"] = i['opinion']#random.choice(i['opinion'])
movies_list[k]["poster"] = str(i['poster'])
las_movies = json.dumps(movies_list)
# This is AJAX
return HttpResponse(las_movies)
@login_required
def delete_title(request):
print "***********************"
print request
print "***********************"
to_del = request.POST['value']
print "to_del = ", to_del
print type(request.user)
print 'request.user =============================', str(request.user)
current_user = UserProfile.objects.get(username=str(request.user))
# user = get_user_and_profile(request.user)['profile']
print "current_user =", current_user
print type(current_user)
title_to_delete = Title.objects.filter(users=current_user, name=to_del)
print 'title_to_delete > ', title_to_delete
title_to_delete.delete()
return HttpResponse("OK") | otreblatercero/cinesnob | califas/ajax_functions.py | Python | gpl-2.0 | 1,641 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from datetime import datetime
from copy import copy
class BudgetDistributionError(osv.except_osv):
name =""
value = ""
move_id=0
def __init__(self, name, value, move_id):
self.value = name
self.value = value
self.move_id =move_id
osv.except_osv.__init__(self, name, value)
osv.except_osv
class AccountMoveReconcile(orm.Model):
_inherit = 'account.move.reconcile'
def unlink(self, cr, uid, ids, context={}):
dist_obj = self.pool.get('account.move.line.distribution')
bud_mov_obj = self.pool.get('budget.move')
dist_ids = dist_obj.search(cr, uid, [('reconcile_ids.id','in',ids)], context=context)
dists = dist_obj.browse(cr, uid, dist_ids, context=context)
budget_move_ids = []
for dist in dists:
if dist.target_budget_move_line_id and \
dist.target_budget_move_line_id.budget_move_id and \
dist.target_budget_move_line_id.budget_move_id.id not in budget_move_ids:
budget_move_ids.append(dist.target_budget_move_line_id.budget_move_id.id)
dist_obj.unlink(cr, uid, dist_ids, context=context)
if budget_move_ids:
bud_mov_obj.recalculate_values(cr, uid, budget_move_ids, context=context)
for mov_id in budget_move_ids:
bud_mov_obj.signal_workflow(cr, uid, [mov_id], 'button_check_execution', context=context)
return super(AccountMoveReconcile, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
account_move_obj= self.pool.get('account.move')
is_incremental = self.check_incremental_reconcile(cr, uid, vals, context=context)
reconcile_id = super(AccountMoveReconcile, self).create(cr, uid, vals, context=context)
try:
self.reconcile_budget_check(cr, uid, [reconcile_id], context=context, is_incremental=is_incremental)
except BudgetDistributionError, error:
msg= _('Budget distributions cannot be created automatically for this reconcile')
account_move_obj.message_post(cr, uid, [error.move_id], body=msg, context=context)
return reconcile_id
def move_in_voucher(self,cr, uid, move_ids, context=None):
#Checks if a move is in a voucher, returns the id of the voucher or -1 in case that is not in any
acc_move_obj = self.pool.get('account.move')
acc_vouch_obj = self.pool.get('account.voucher')
for move_id in move_ids:
voucher_search = acc_vouch_obj.search(cr, uid, [('move_id','=',move_id)], context=context)
if len(voucher_search) > 0:
return voucher_search[0]
else:
return -1
def move_in_invoice(self,cr, uid, move_ids, context=None):
#checks if a move is in an invoice, returns the id of the invoice or -1 in case that is not in any
acc_move_obj = self.pool.get('account.move')
acc_inv_obj = self.pool.get('account.invoice')
for move_id in move_ids:
invoice_search = acc_inv_obj.search(cr, uid, [('move_id','=',move_id)], context=context)
if len(invoice_search) > 0:
return invoice_search[0]
else:
return -1
def line_in_move(self,cr, uid, line_ids, context=None):
#checks if a move is in an invoice, returns the id of the invoice or -1 in case that is not in any
mov_line_obj = self.pool.get('account.move.line')
for line in mov_line_obj.browse(cr, uid, line_ids, context=context):
return line.move_id.id
return -1
def create_budget_account_reconcile(self, cr, uid, invoice_id, payment_move_line_id, payment_reconcile_id, reconcile_ids, context=None):
acc_inv_obj = self.pool.get('account.invoice')
acc_vouch_obj = self.pool.get('account.voucher')
move_line_obj = self.pool.get('account.move.line')
amld = self.pool.get('account.move.line.distribution')
bud_move_obj = self.pool.get('budget.move')
bud_move_line_obj = self.pool.get('budget.move.line')
currency_id = None
amount = 0
for line in move_line_obj.browse(cr, uid, [payment_move_line_id],context=context):
if line.credit > 0:
amount = line.credit
elif line.debit > 0:
amount = line.debit
elif line.debit == 0 and line.credit == 0:
amount = line.amount_currency
currency_id = line.currency_id
if currency_id:
pass
#TODO
#Distributions work only with system currency, if it is necessary to work with other currency, logic MUST be implemented from recursive distribution algorithms first
invoice = acc_inv_obj.browse(cr, uid,[invoice_id],context = context)[0]
bud_move = invoice.budget_move_id
i=0
sum = 0
for line in bud_move.move_lines:
if i < len(bud_move.move_lines)-1:
perc = line.fixed_amount/bud_move.fixed_amount or 0
amld.create(cr, uid, {'budget_move_id': bud_move.id,
'budget_move_line_id': line.id,
'account_move_line_id': payment_move_line_id,
'account_move_reconcile_id': payment_reconcile_id,
'amount' : amount * perc
})
sum += amount * perc
i+=1
amld.create(cr, uid, {'budget_move_id': bud_move.id,
'budget_move_line_id': bud_move.move_lines[i].id,
'account_move_line_id': payment_move_line_id,
'account_move_reconcile_id': payment_reconcile_id,
'amount' : amount - sum
})
bud_move_line_obj.write(cr, uid, [line.id],{'date': line.date}, context=context)
bud_move_obj.write(cr, uid , [bud_move.id], {'code': bud_move.code}, context=context)
def _get_move_counterparts(self, cr, uid, line, context={}):
is_debit = True if line.debit else False
res = []
debit_bud= False
credit_bud=False
for move_line in line.move_id.line_id:
if move_line.credit and move_line.budget_program_line:
credit_bud = True
if move_line.debit and move_line.budget_program_line:
debit_bud = True
if credit_bud and debit_bud:
raise BudgetDistributionError(_('Error'), _('Budget distributions cannot be created automatically for this reconcile'), line.move_id.id)
for move_line in line.move_id.line_id:
if (is_debit and move_line.credit) or (not is_debit and move_line.debit):
res.append(move_line)
return res
def _recursive_liquid_get_auto_distribution(self, cr, uid, original_line, actual_line = None, checked_lines = [], amount_to_dist = 0.0, original_amount_to_dist = 0.0, reconcile_ids = [], continue_reconcile = False, context={},is_incremental=False):
"""
Receives an account.move.line that moves liquid and was found creating a reconcile.
This method starts at this line and "travels" through the moves and reconciles line counterparts
to try to find a budget move to match with.
Returns the list of account.move.line.distribution created, or an empty list.
"""
dist_obj = self.pool.get('account.move.line.distribution')
budget_move_line_obj = self.pool.get('budget.move.line')
# Check if not first call and void type line. This kind of lines only can be navigated when called first by the main method.
if actual_line and actual_line.move_id.budget_type == 'void':
return []
# Check if first call and not liquid or void line
if not actual_line and not original_line.account_id.moves_cash:
return []
# Check for first call
if not actual_line:
dist_search = dist_obj.search(cr, uid, [('account_move_line_id','=',original_line.id)],context=context)
dist_obj.unlink(cr,uid,dist_search,context=context,is_incremental=is_incremental)
actual_line = original_line
amount_to_dist = original_line.debit + original_line.credit
original_amount_to_dist = amount_to_dist
checked_lines = [actual_line.id]
if not amount_to_dist:
return []
budget_lines = {}
liquid_lines = {}
none_lines = {}
budget_amounts = {}
liquid_amounts = {}
budget_total = 0.0
liquid_total = 0.0
none_total = 0.0
amount_total = 0.0
new_reconcile_ids = copy(reconcile_ids)
# Get line counterparts, if the reconcile flag is on, the counterparts are looked in the reconcile, if not move is used
if continue_reconcile:
line_reconcile_ids, counterparts = self._get_reconcile_counterparts(cr, uid, actual_line, context=context)
new_reconcile_ids += line_reconcile_ids
else:
counterparts = self._get_move_counterparts(cr, uid, actual_line, context=context)
for counterpart in counterparts:
if counterpart.id not in checked_lines:
# Check if there are any budget move lines associated with this counterpart
budget_move_lines_found = budget_move_line_obj.search(cr, uid, [('move_line_id','=',counterpart.id)], context=context)
if budget_move_lines_found:
budget_lines[counterpart.id] = budget_move_lines_found
budget_amounts[counterpart.id] = counterpart.debit + counterpart.credit
budget_total += counterpart.debit + counterpart.credit
amount_total += counterpart.debit + counterpart.credit
elif counterpart.account_id.moves_cash:
liquid_lines[counterpart.id] = counterpart
liquid_amounts[counterpart.id] = counterpart.debit + counterpart.credit
liquid_total += counterpart.debit + counterpart.credit
amount_total += counterpart.debit + counterpart.credit
else:
none_lines[counterpart.id] = counterpart
none_total += counterpart.debit + counterpart.credit
amount_total += counterpart.debit + counterpart.credit
checked_lines.append(counterpart.id)
if not (budget_lines or liquid_lines or none_lines):
return []
if amount_total and amount_total > amount_to_dist:
budget_amount_to_dist = budget_total * amount_to_dist / amount_total
liquid_amount_to_dist = liquid_total * amount_to_dist / amount_total
none_amount_to_dist = amount_to_dist - budget_amount_to_dist - liquid_amount_to_dist
elif amount_total:
budget_amount_to_dist = budget_total
liquid_amount_to_dist = liquid_total
none_amount_to_dist = none_total
else:
# Nothing to distribute
return []
none_res = []
if none_total:
for line in none_lines.values():
line_amount_to_dist = (none_amount_to_dist if line.debit + line.credit >= none_amount_to_dist else line.debit + line.credit)
# Use none_amount_to_dist with all lines as we don't know which ones will find something
none_res += self._recursive_liquid_get_auto_distribution(cr, uid, original_line,
actual_line = line,
checked_lines = checked_lines,
amount_to_dist = line_amount_to_dist,
original_amount_to_dist = original_amount_to_dist,
reconcile_ids = new_reconcile_ids,
continue_reconcile = (not continue_reconcile),
context = context)
# Check if there is budget, void or liquid lines, if not return none_res, even if its empty.
budget_res = []
liquid_res = []
budget_distributed = 0.0
liquid_distributed = 0.0
bud_move_obj = self.pool.get('budget.move')
if budget_lines or liquid_lines:
# Write dists and build lists
dist_obj = self.pool.get('account.move.line.distribution')
# Budget list
budget_total = 0.0
budget_budget_move_line_ids = []
budget_budget_move_lines_ids = []
budget_budget_move_lines = []
#lines is an int (id)
for lines in budget_lines.values():
budget_budget_move_lines_ids += lines
#Browse record: lines is an int not an object!
budget_budget_move_lines = self.pool.get('budget.move.line').browse(cr,uid, budget_budget_move_lines_ids,context=context)
for line in budget_budget_move_lines:
budget_budget_move_line_ids.append(line.id)
budget_total += abs(line.fixed_amount)
for line in budget_budget_move_lines:
distribution_amount = abs(line.fixed_amount)
# If the resulting total of budget plus liquid lines is more than available, the amount has to be fractioned.
if abs(budget_total) + liquid_amount_to_dist > amount_to_dist:
distribution_amount = distribution_amount * amount_to_dist / budget_total + liquid_amount_to_dist
if line.fixed_amount < 0:
signed_dist_amount = distribution_amount * -1
else:
signed_dist_amount = distribution_amount
budget_distributed += distribution_amount
vals = {
'account_move_line_id': original_line.id,
'distribution_amount': signed_dist_amount,
'distribution_percentage': 100 * abs(distribution_amount) / abs(original_amount_to_dist),
'target_budget_move_line_id': line.id,
'reconcile_ids': [(6, 0, new_reconcile_ids)],
'type': 'auto',
'account_move_line_type': 'liquid',
}
budget_res.append(dist_obj.create(cr, uid, vals, context = context))
bud_move_obj.signal_workflow(cr, uid, [line.budget_move_id.id], 'button_check_execution', context=context)
# Liquid list
for line in liquid_lines.values():
distribution_amount = liquid_amounts[line.id]
if line.fixed_amount < 0:
signed_dist_amount = distribution_amount * -1
else:
signed_dist_amount = distribution_amount
liquid_distributed += distribution_amount
vals = {
'account_move_line_id': original_line.id,
'distribution_amount': signed_dist_amount,
'distribution_percentage': 100 * abs(distribution_amount) / abs(original_amount_to_dist),
'target_account_move_line_id': line.id,
'reconcile_ids': [(6, 0, new_reconcile_ids)],
'type': 'auto',
}
liquid_res.append(dist_obj.create(cr, uid, vals, context = context))
bud_move_obj.signal_workflow(cr, uid, [line.budget_move_id.id], 'button_check_execution', context=context)
distributed_amount = budget_distributed + liquid_distributed
# Check if some dists are returned to adjust their values
if none_res:
self._adjust_distributed_values(cr, uid, none_res, amount_to_dist - distributed_amount, context = context, object="budget")
return budget_res + liquid_res + none_res
def _recursive_void_get_auto_distribution(self, cr, uid, original_line, actual_line = None, checked_lines = [], amount_to_dist = 0.0, original_amount_to_dist = 0.0, reconcile_ids = [], continue_reconcile = False, context={}):
"""
Receives an account.move.line that is marked as void for budget moves and was found creating a reconcile.
This method starts at this line and "travels" through the moves and reconciles line counterparts
to try to find a budget move to match with.
Returns the list of account.move.line.distribution created, or an empty list.
"""
budget_move_line_obj = self.pool.get('budget.move.line')
# Check if not first call and void type line. This kind of lines only can be navigated when called first by the main method.
if actual_line and actual_line.move_id.budget_type == 'void':
return []
# Check if first call and not void line
if not actual_line and not actual_line.move_id.budget_type == 'void':
return []
# Check for first call
if not actual_line:
actual_line = original_line
amount_to_dist = original_line.debit + original_line.credit
original_amount_to_dist = amount_to_dist
checked_lines.append(actual_line.id)
if not amount_to_dist:
return []
budget_lines = {}
none_lines = {}
budget_amounts = {}
budget_total = 0.0
none_total = 0.0
amount_total = 0.0
new_reconcile_ids = copy(reconcile_ids)
# Get line counterparts, if the reconcile flag is on, the counterparts are looked in the reconcile, if not move is used
if continue_reconcile:
line_reconcile_ids, counterparts = self._get_reconcile_counterparts(cr, uid, actual_line, context=context)
new_reconcile_ids += line_reconcile_ids
else:
counterparts = self._get_move_counterparts(cr, uid, actual_line, context=context)
for counterpart in counterparts:
if counterpart.id not in checked_lines:
# Check if there are any budget move lines associated with this counterpart
budget_move_lines_found = budget_move_line_obj.search(cr, uid, [('move_line_id','=',counterpart.id)], context=context)
if budget_move_lines_found:
budget_lines[counterpart.id] = budget_move_lines_found
budget_amounts[counterpart.id] = counterpart.debit + counterpart.credit
budget_total += counterpart.debit + counterpart.credit
amount_total += counterpart.debit + counterpart.credit
elif not counterpart.account_id.moves_cash and counterpart.move_id.budget_type != 'void':
none_lines[counterpart.id] = counterpart
none_total += counterpart.debit + counterpart.credit
amount_total += counterpart.debit + counterpart.credit
checked_lines.append(counterpart.id)
if not (budget_lines or none_lines):
return []
if amount_total and amount_total > amount_to_dist:
budget_amount_to_dist = budget_total * amount_to_dist / amount_total
none_amount_to_dist = amount_to_dist - budget_amount_to_dist
elif amount_total:
budget_amount_to_dist = budget_total
none_amount_to_dist = none_total
else:
# Nothing to distribute
return []
none_res = []
if none_total:
for line in none_lines.values():
line_amount_to_dist = (none_amount_to_dist if line.debit + line.credit >= none_amount_to_dist else line.debit + line.credit)
# Use none_amount_to_dist with all lines as we don't know which ones will find something
none_res += self._recursive_void_get_auto_distribution(cr, uid, original_line,
actual_line = line,
checked_lines = checked_lines,
amount_to_dist = line_amount_to_dist,
reconcile_ids = new_reconcile_ids,
continue_reconcile = (not continue_reconcile),
context = context)
budget_res = []
budget_distributed = 0.0
# Check if there is budget, void or liquid lines, if not return none_res, even if its empty.
if budget_lines:
# Write dists and build lists
dist_obj = self.pool.get('account.move.line.distribution')
# Budget list
budget_total = 0.0
budget_budget_move_line_ids = []
budget_budget_move_lines = []
bud_move_obj = self.pool.get('budget.move')
for lines in budget_lines.values():
budget_budget_move_lines += lines
for line in budget_budget_move_lines:
budget_budget_move_line_ids.append(line.id)
budget_total += abs(line.compromised)
for line in budget_budget_move_lines:
distribution_amount = abs(line.compromised)
# If the resulting total of budget plus liquid lines is more than available, the amount has to be fractioned.
if budget_total > amount_to_dist:
distribution_amount = distribution_amount * amount_to_dist / budget_total
if line.fixed_amount < 0:
signed_dist_amount = distribution_amount * -1
else:
signed_dist_amount = distribution_amount
budget_distributed += distribution_amount
vals = {
'account_move_line_id': original_line.id,
'distribution_amount': signed_dist_amount,
'distribution_percentage': 100 * abs(distribution_amount) / abs(original_amount_to_dist),
'target_budget_move_line_id': line.id,
'reconcile_ids': [(6, 0, new_reconcile_ids)],
'type': 'auto',
'account_move_line_type': 'liquid',
}
budget_res.append(dist_obj.create(cr, uid, vals, context = context))
bud_move_obj.signal_workflow(cr, uid, [line.budget_move_id.id], 'button_check_execution', context=context)
distributed_amount = budget_distributed
# Check if some dists are returned to adjust their values
if none_res:
self._adjust_distributed_values(cr, uid, none_res, amount_to_dist - distributed_amount, context = context, object="budget")
return budget_res + none_res
def reconcile_budget_check(self, cr, uid, ids, context={}, is_incremental=False):
done_lines = []
res = {}
for reconcile in self.browse(cr, uid, ids, context=context):
# Check if reconcile "touches" a move that touches a liquid account on any of its move lines
# First get the moves of the reconciled lines
if reconcile.line_id:
moves = [line.move_id for line in reconcile.line_id]
else:
moves = [line.move_id for line in reconcile.line_partial_ids]
# Then get all the lines of those moves, reconciled and counterparts
move_lines = [line for move in moves for line in move.line_id]
# Check if the account if marked as moves_cash
for line in move_lines:
if (line.id not in done_lines) and line.account_id and line.account_id.moves_cash:
dist_ids = self._recursive_liquid_get_auto_distribution(cr, uid, line, context=context, is_incremental=is_incremental)
checked_dist_ids = self._check_auto_distributions(cr, uid, line, dist_ids, context=context,object="budget")
if checked_dist_ids:
res[line.id] = checked_dist_ids
elif (line.id not in done_lines) and line.move_id.budget_type == 'void':
dist_ids = self._recursive_void_get_auto_distribution(cr, uid, line, context=context, is_incremental=is_incremental)
checked_dist_ids = self._check_auto_distributions(cr, uid, line, dist_ids, context=context, object="budget")
if checked_dist_ids:
res[line.id] = checked_dist_ids
done_lines.append(line.id)
# Recalculate budget move values
if res:
budget_move_ids = []
dist_obj = self.pool.get('account.move.line.distribution')
dist_ids = [dist_id for dist_ids in res.values() for dist_id in dist_ids]
dists = dist_obj.browse(cr, uid, dist_ids)
for dist in dists:
if dist.target_budget_move_line_id and dist.target_budget_move_line_id.budget_move_id:
budget_move_ids.append(dist.target_budget_move_line_id.budget_move_id.id)
if budget_move_ids:
budget_move_obj = self.pool.get('budget.move')
budget_move_obj.recalculate_values(cr, uid, budget_move_ids, context = context)
return res
class Account(osv.Model):
_inherit = 'account.account'
_columns = {
'default_budget_program_line' : fields.many2one('budget.program.line','Default budget program line'),
}
| ClearCorp-dev/odoo-clearcorp | TODO-9.0/budget/account.py | Python | agpl-3.0 | 28,048 |
# -*- encoding: utf-8 -*-
# back ported from CPython 3
# A. HISTORY OF THE SOFTWARE
# ==========================
#
# Python was created in the early 1990s by Guido van Rossum at Stichting
# Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
# as a successor of a language called ABC. Guido remains Python's
# principal author, although it includes many contributions from others.
#
# In 1995, Guido continued his work on Python at the Corporation for
# National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
# in Reston, Virginia where he released several versions of the
# software.
#
# In May 2000, Guido and the Python core development team moved to
# BeOpen.com to form the BeOpen PythonLabs team. In October of the same
# year, the PythonLabs team moved to Digital Creations (now Zope
# Corporation, see http://www.zope.com). In 2001, the Python Software
# Foundation (PSF, see http://www.python.org/psf/) was formed, a
# non-profit organization created specifically to own Python-related
# Intellectual Property. Zope Corporation is a sponsoring member of
# the PSF.
#
# All Python releases are Open Source (see http://www.opensource.org for
# the Open Source Definition). Historically, most, but not all, Python
# releases have also been GPL-compatible; the table below summarizes
# the various releases.
#
# Release Derived Year Owner GPL-
# from compatible? (1)
#
# 0.9.0 thru 1.2 1991-1995 CWI yes
# 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
# 1.6 1.5.2 2000 CNRI no
# 2.0 1.6 2000 BeOpen.com no
# 1.6.1 1.6 2001 CNRI yes (2)
# 2.1 2.0+1.6.1 2001 PSF no
# 2.0.1 2.0+1.6.1 2001 PSF yes
# 2.1.1 2.1+2.0.1 2001 PSF yes
# 2.2 2.1.1 2001 PSF yes
# 2.1.2 2.1.1 2002 PSF yes
# 2.1.3 2.1.2 2002 PSF yes
# 2.2.1 2.2 2002 PSF yes
# 2.2.2 2.2.1 2002 PSF yes
# 2.2.3 2.2.2 2003 PSF yes
# 2.3 2.2.2 2002-2003 PSF yes
# 2.3.1 2.3 2002-2003 PSF yes
# 2.3.2 2.3.1 2002-2003 PSF yes
# 2.3.3 2.3.2 2002-2003 PSF yes
# 2.3.4 2.3.3 2004 PSF yes
# 2.3.5 2.3.4 2005 PSF yes
# 2.4 2.3 2004 PSF yes
# 2.4.1 2.4 2005 PSF yes
# 2.4.2 2.4.1 2005 PSF yes
# 2.4.3 2.4.2 2006 PSF yes
# 2.4.4 2.4.3 2006 PSF yes
# 2.5 2.4 2006 PSF yes
# 2.5.1 2.5 2007 PSF yes
# 2.5.2 2.5.1 2008 PSF yes
# 2.5.3 2.5.2 2008 PSF yes
# 2.6 2.5 2008 PSF yes
# 2.6.1 2.6 2008 PSF yes
# 2.6.2 2.6.1 2009 PSF yes
# 2.6.3 2.6.2 2009 PSF yes
# 2.6.4 2.6.3 2009 PSF yes
# 2.6.5 2.6.4 2010 PSF yes
# 2.7 2.6 2010 PSF yes
#
# Footnotes:
#
# (1) GPL-compatible doesn't mean that we're distributing Python under
# the GPL. All Python licenses, unlike the GPL, let you distribute
# a modified version without making your changes open source. The
# GPL-compatible licenses make it possible to combine Python with
# other software that is released under the GPL; the others don't.
#
# (2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
# because its license has a choice of law clause. According to
# CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
# is "not incompatible" with the GPL.
#
# Thanks to the many outside volunteers who have worked under Guido's
# direction to make these releases possible.
#
#
# B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
# ===============================================================
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained
# in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
# -------------------------------------------
#
# BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
#
# 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
# office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
# Individual or Organization ("Licensee") accessing and otherwise using
# this software in source or binary form and its associated
# documentation ("the Software").
#
# 2. Subject to the terms and conditions of this BeOpen Python License
# Agreement, BeOpen hereby grants Licensee a non-exclusive,
# royalty-free, world-wide license to reproduce, analyze, test, perform
# and/or display publicly, prepare derivative works, distribute, and
# otherwise use the Software alone or in any derivative version,
# provided, however, that the BeOpen Python License is retained in the
# Software, alone or in any derivative version prepared by Licensee.
#
# 3. BeOpen is making the Software available to Licensee on an "AS IS"
# basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
# SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
# AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
# DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 5. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 6. This License Agreement shall be governed by and interpreted in all
# respects by the law of the State of California, excluding conflict of
# law provisions. Nothing in this License Agreement shall be deemed to
# create any relationship of agency, partnership, or joint venture
# between BeOpen and Licensee. This License Agreement does not grant
# permission to use BeOpen trademarks or trade names in a trademark
# sense to endorse or promote products or services of Licensee, or any
# third party. As an exception, the "BeOpen Python" logos available at
# http://www.pythonlabs.com/logos.html may be used according to the
# permissions granted on that web page.
#
# 7. By copying, installing or otherwise using the software, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
# ---------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Corporation for National
# Research Initiatives, having an office at 1895 Preston White Drive,
# Reston, VA 20191 ("CNRI"), and the Individual or Organization
# ("Licensee") accessing and otherwise using Python 1.6.1 software in
# source or binary form and its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, CNRI
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python 1.6.1
# alone or in any derivative version, provided, however, that CNRI's
# License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
# 1995-2001 Corporation for National Research Initiatives; All Rights
# Reserved" are retained in Python 1.6.1 alone or in any derivative
# version prepared by Licensee. Alternately, in lieu of CNRI's License
# Agreement, Licensee may substitute the following text (omitting the
# quotes): "Python 1.6.1 is made available subject to the terms and
# conditions in CNRI's License Agreement. This Agreement together with
# Python 1.6.1 may be located on the Internet using the following
# unique, persistent identifier (known as a handle): 1895.22/1013. This
# Agreement may also be obtained from a proxy server on the Internet
# using the following URL: http://hdl.handle.net/1895.22/1013".
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python 1.6.1 or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python 1.6.1.
#
# 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
# basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. This License Agreement shall be governed by the federal
# intellectual property law of the United States, including without
# limitation the federal copyright law, and, to the extent such
# U.S. federal law does not apply, by the law of the Commonwealth of
# Virginia, excluding Virginia's conflict of law provisions.
# Notwithstanding the foregoing, with regard to derivative works based
# on Python 1.6.1 that incorporate non-separable material that was
# previously distributed under the GNU General Public License (GPL), the
# law of the Commonwealth of Virginia shall govern this License
# Agreement only as to issues arising under or with respect to
# Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
# License Agreement shall be deemed to create any relationship of
# agency, partnership, or joint venture between CNRI and Licensee. This
# License Agreement does not grant permission to use CNRI trademarks or
# trade name in a trademark sense to endorse or promote products or
# services of Licensee, or any third party.
#
# 8. By clicking on the "ACCEPT" button where indicated, or by copying,
# installing or otherwise using Python 1.6.1, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
#
# ACCEPT
#
#
# CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
# --------------------------------------------------
#
# Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
# The Netherlands. All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Stichting Mathematisch
# Centrum or CWI not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
h_append([next(it), order * direction, it])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, it = s = h[0]
yield value
s[0] = next(it) # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, it = h[0]
yield value
for value in it:
yield value
return
for order, it in enumerate(map(iter, iterables)):
try:
value = next(it)
h_append([key(value), order * direction, value, it])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, it = s = h[0]
yield value
value = next(it)
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, it = h[0]
yield value
for value in it:
yield value
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probabilty times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top = result[0][0]
order += 1
result.sort()
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order += 1
result.sort()
return [r[2] for r in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result]
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
try:
from _heapq import _heapreplace_max
except ImportError:
pass
try:
from _heapq import _heapify_max
except ImportError:
pass
try:
from _heapq import _heappop_max
except ImportError:
pass
if __name__ == "__main__":
import doctest
print(doctest.testmod())
| andrewor14/iolap | python/pyspark/heapq3.py | Python | apache-2.0 | 37,518 |
import sys
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from kaleo.models import InvitationStat
class Command(BaseCommand):
help = "Makes sure all users have a certain number of invites."
def handle(self, *args, **kwargs):
if len(args) == 0:
sys.exit("You must supply the number of invites as an argument.")
try:
num_of_invites = int(args[0])
except ValueError:
sys.exit("The argument for number of invites must be an integer.")
for user in User.objects.all():
stat, _ = InvitationStat.objects.get_or_create(user=user)
remaining = stat.invites_remaining()
if remaining < num_of_invites:
stat.invites_allocated += (num_of_invites - remaining)
stat.save()
| justhamade/kaleo | kaleo/management/commands/topoff_invites.py | Python | bsd-3-clause | 877 |
from django.core.management.base import BaseCommand, CommandError
from shipping.modules.tieredweight.models import Carrier as TWCarrier, Zone as TWZone, ZoneTranslation as TWZoneTranslation, WeightTier as TWWeightTier
from nogroth.models import *
class Command(BaseCommand):
help = 'Converts existing Tiered Weight shipping rules to NoGroTH'
def handle(self, *args, **options):
counts = {}
carriers = TWCarrier.objects.all()
counts['car'] = carriers.count()
for carrier in carriers.values():
Carrier.objects.create(**carrier)
zones = TWZone.objects.all()
counts['zo'] = zones.count()
for zone in zones.values():
Zone.objects.create(**zone)
zone_translations = TWZoneTranslation.objects.all()
counts['zotr'] = zone_translations.count()
for zt in zone_translations.values():
ZoneTranslation.objects.create(**zt)
weight_tiers = TWWeightTier.objects.all()
counts['wt'] = weight_tiers.count()
for wt in weight_tiers.values():
WeightTier.objects.create(**wt)
self.stdout.write('Converted %(car)s carriers, %(zo)s zones, %(zotr)s zone translations, and %(wt)s weight tiers to NoGroTH\n' % counts)
| kevinharvey/satchmo-nogroth | nogroth/management/commands/satchmo_nogroth_copy_tiers.py | Python | mit | 1,282 |
# for localized messages
from . import _
from Screens.Screen import Screen
from enigma import eTimer
from boxbranding import getMachineBrand, getMachineName
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, config, ConfigSelection, NoSave, configfile
from Components.Console import Console
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Tools.LoadPixmap import LoadPixmap
from os import system, rename, path, mkdir, remove
from time import sleep
from re import search
class HddMount(Screen):
skin = """
<screen position="center,center" size="640,460" title="Mount Manager">
<ePixmap pixmap="skin_default/buttons/red.png" position="25,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="175,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="325,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="475,0" size="140,40" alphatest="on" />
<widget name="key_red" position="25,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="175,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="325,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="key_blue" position="475,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="10,50" size="620,450" scrollbarMode="showOnDemand" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (90, 0), size = (600, 30), font=0, text = 0),
MultiContentEntryText(pos = (110, 30), size = (600, 50), font=1, flags = RT_VALIGN_TOP, text = 1),
MultiContentEntryPixmapAlphaBlend(pos = (0, 0), size = (80, 80), png = 2),
],
"fonts": [gFont("Regular", 24),gFont("Regular", 20)],
"itemHeight": 85
}
</convert>
</widget>
<widget name="lab1" zPosition="2" position="50,90" size="600,40" font="Regular;22" halign="center" transparent="1"/>
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Mount Manager"))
self['key_red'] = Label(" ")
self['key_green'] = Label(_("Setup Mounts"))
self['key_yellow'] = Label("Unmount")
self['key_blue'] = Label("Mount")
self['lab1'] = Label()
self.onChangedEntry = [ ]
self.list = []
self['list'] = List(self.list)
self["list"].onSelectionChanged.append(self.selectionChanged)
self['actions'] = ActionMap(['WizardActions', 'ColorActions', "MenuActions"], {'back': self.close, 'green': self.SetupMounts, 'red': self.saveMypoints, 'yellow': self.Unmount, 'blue': self.Mount, "menu": self.close})
self.activityTimer = eTimer()
self.activityTimer.timeout.get().append(self.updateList2)
self.updateList()
def createSummary(self):
return DevicesPanelSummary
def selectionChanged(self):
if len(self.list) == 0:
return
self.sel = self['list'].getCurrent()
mountp = self.sel[3]
if mountp.find('/media/hdd') < 0:
self["key_red"].setText(_("Use as HDD"))
else:
self["key_red"].setText(" ")
if self.sel:
try:
name = str(self.sel[0])
desc = str(self.sel[1].replace('\t',' '))
except:
name = ""
desc = ""
else:
name = ""
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def updateList(self, result = None, retval = None, extra_args = None):
scanning = _("Wait please while scanning for devices...")
self['lab1'].setText(scanning)
self.activityTimer.start(10)
def updateList2(self):
self.activityTimer.stop()
self.list = []
list2 = []
f = open('/proc/partitions', 'r')
for line in f.readlines():
parts = line.strip().split()
if not parts:
continue
device = parts[3]
if not search('sd[a-z][1-9]',device):
continue
if device in list2:
continue
self.buildMy_rec(device)
list2.append(device)
f.close()
self['list'].list = self.list
self['lab1'].hide()
def buildMy_rec(self, device):
device2 = ''
try:
if device.find('1') > 1:
device2 = device.replace('1', '')
except:
device2 = ''
try:
if device.find('2') > 1:
device2 = device.replace('2', '')
except:
device2 = ''
try:
if device.find('3') > 1:
device2 = device.replace('3', '')
except:
device2 = ''
try:
if device.find('4') > 1:
device2 = device.replace('4', '')
except:
device2 = ''
try:
if device.find('5') > 1:
device2 = device.replace('5', '')
except:
device2 = ''
try:
if device.find('6') > 1:
device2 = device.replace('6', '')
except:
device2 = ''
try:
if device.find('7') > 1:
device2 = device.replace('7', '')
except:
device2 = ''
try:
if device.find('8') > 1:
device2 = device.replace('8', '')
except:
device2 = ''
devicetype = path.realpath('/sys/block/' + device2 + '/device')
d2 = device
name = 'USB: '
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_usbstick.png'
model = file('/sys/block/' + device2 + '/device/model').read()
model = str(model).replace('\n', '')
des = ''
if devicetype.find('/devices/pci') != -1:
name = _("HARD DISK: ")
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_hdd.png'
name = name + model
self.Console = Console()
self.Console.ePopen("sfdisk -l /dev/sd? | grep swap | awk '{print $(NF-9)}' >/tmp/devices.tmp")
sleep(0.5)
f = open('/tmp/devices.tmp', 'r')
swapdevices = f.read()
f.close()
if path.exists('/tmp/devices.tmp'):
remove('/tmp/devices.tmp')
swapdevices = swapdevices.replace('\n','')
swapdevices = swapdevices.split('/')
f = open('/proc/mounts', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
d1 = parts[1]
dtype = parts[2]
rw = parts[3]
break
continue
else:
if device in swapdevices:
parts = line.strip().split()
d1 = _("None")
dtype = 'swap'
rw = _("None")
break
continue
else:
d1 = _("None")
dtype = _("unavailable")
rw = _("None")
f.close()
f = open('/proc/partitions', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
size = int(parts[2])
if (((float(size) / 1024) / 1024) / 1024) > 1:
des = _("Size: ") + str(round((((float(size) / 1024) / 1024) / 1024),2)) + _("TB")
elif ((size / 1024) / 1024) > 1:
des = _("Size: ") + str((size / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str(size / 1024) + _("MB")
else:
try:
size = file('/sys/block/' + device2 + '/' + device + '/size').read()
size = str(size).replace('\n', '')
size = int(size)
except:
size = 0
if ((((float(size) / 2) / 1024) / 1024) / 1024) > 1:
des = _("Size: ") + str(round(((((float(size) / 2) / 1024) / 1024) / 1024),2)) + _("TB")
elif (((size / 2) / 1024) / 1024) > 1:
des = _("Size: ") + str(((size / 2) / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str((size / 2) / 1024) + _("MB")
f.close()
if des != '':
if rw.startswith('rw'):
rw = ' R/W'
elif rw.startswith('ro'):
rw = ' R/O'
else:
rw = ""
des += '\t' + _("Mount: ") + d1 + '\n' + _("Device: ") + '/dev/' + device + '\t' + _("Type: ") + dtype + rw
png = LoadPixmap(mypixmap)
mountP = d1
deviceP = '/dev/' + device
res = (name, des, png, mountP, deviceP)
self.list.append(res)
def SetupMounts(self):
self.session.openWithCallback(self.updateList, DevicePanelConf)
def Mount(self):
sel = self['list'].getCurrent()
if sel:
mountp = sel[3]
device = sel[4]
system ('mount ' + device)
mountok = False
f = open('/proc/mounts', 'r')
for line in f.readlines():
if line.find(device) != -1:
mountok = True
if not mountok:
self.session.open(MessageBox, _("Mount failed"), MessageBox.TYPE_INFO, timeout=5)
self.updateList()
def Unmount(self):
sel = self['list'].getCurrent()
if sel:
mountp = sel[3]
device = sel[4]
system ('umount ' + mountp)
try:
mounts = open("/proc/mounts")
except IOError:
return -1
mountcheck = mounts.readlines()
mounts.close()
for line in mountcheck:
parts = line.strip().split(" ")
if path.realpath(parts[0]).startswith(device):
self.session.open(MessageBox, _("Can't unmount partiton, make sure it is not being used for swap or record/timeshift paths"), MessageBox.TYPE_INFO)
self.updateList()
def saveMypoints(self):
sel = self['list'].getCurrent()
if sel:
self.mountp = sel[3]
self.device = sel[4]
if self.mountp.find('/media/hdd') < 0:
self.Console.ePopen('umount ' + self.device)
if not path.exists('/media/hdd'):
mkdir('/media/hdd', 0755)
else:
self.Console.ePopen('umount /media/hdd')
self.Console.ePopen('mount ' + self.device + ' /media/hdd')
self.Console.ePopen("/sbin/blkid | grep " + self.device, self.add_fstab, [self.device, self.mountp])
else:
self.session.open(MessageBox, _("This Device is already mounted as HDD."), MessageBox.TYPE_INFO, timeout = 10, close_on_any_key = True)
def add_fstab(self, result = None, retval = None, extra_args = None):
self.device = extra_args[0]
self.mountp = extra_args[1]
self.device_uuid_tmp = result.split('UUID=')
self.device_uuid_tmp = self.device_uuid_tmp[1].replace('"',"")
self.device_uuid_tmp = self.device_uuid_tmp.replace('\n',"")
self.device_uuid_tmp = self.device_uuid_tmp.split()[0]
self.device_uuid = 'UUID=' + self.device_uuid_tmp
if not path.exists(self.mountp):
mkdir(self.mountp, 0755)
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if '/media/hdd' not in l])
rename('/etc/fstab.tmp','/etc/fstab')
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if self.device not in l])
rename('/etc/fstab.tmp','/etc/fstab')
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if self.device_uuid not in l])
rename('/etc/fstab.tmp','/etc/fstab')
out = open('/etc/fstab', 'a')
line = self.device_uuid + '\t/media/hdd\tauto\tdefaults\t0 0\n'
out.write(line)
out.close()
self.Console.ePopen('mount /media/hdd', self.updateList)
def restBo(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 2)
else:
self.updateList()
self.selectionChanged()
class DevicePanelConf(Screen, ConfigListScreen):
skin = """
<screen position="center,center" size="640,460" title="Choose where to mount your devices to:">
<ePixmap pixmap="skin_default/buttons/red.png" position="25,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="175,0" size="140,40" alphatest="on" />
<widget name="key_red" position="25,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="175,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="config" position="30,60" size="580,275" scrollbarMode="showOnDemand"/>
<widget name="Linconn" position="30,375" size="580,20" font="Regular;18" halign="center" valign="center" backgroundColor="#9f1313"/>
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.list = []
ConfigListScreen.__init__(self, self.list)
Screen.setTitle(self, _("Choose where to mount your devices to:"))
self['key_green'] = Label(_("Save"))
self['key_red'] = Label(_("Cancel"))
self['Linconn'] = Label(_("Wait please while scanning your %s %s devices...") % (getMachineBrand(), getMachineName()))
self['actions'] = ActionMap(['WizardActions', 'ColorActions'], {'green': self.saveMypoints, 'red': self.close, 'back': self.close})
self.updateList()
def updateList(self):
self.list = []
list2 = []
self.Console = Console()
self.Console.ePopen("sfdisk -l /dev/sd? | grep swap | awk '{print $(NF-9)}' >/tmp/devices.tmp")
sleep(0.5)
f = open('/tmp/devices.tmp', 'r')
swapdevices = f.read()
f.close()
if path.exists('/tmp/devices.tmp'):
remove('/tmp/devices.tmp')
swapdevices = swapdevices.replace('\n','')
swapdevices = swapdevices.split('/')
f = open('/proc/partitions', 'r')
for line in f.readlines():
parts = line.strip().split()
if not parts:
continue
device = parts[3]
if not search('sd[a-z][1-9]',device):
continue
if device in list2:
continue
if device in swapdevices:
continue
self.buildMy_rec(device)
list2.append(device)
f.close()
self['config'].list = self.list
self['config'].l.setList(self.list)
self['Linconn'].hide()
def buildMy_rec(self, device):
try:
if device.find('1') > 0:
device2 = device.replace('1', '')
except:
device2 = ''
try:
if device.find('2') > 0:
device2 = device.replace('2', '')
except:
device2 = ''
try:
if device.find('3') > 0:
device2 = device.replace('3', '')
except:
device2 = ''
try:
if device.find('4') > 0:
device2 = device.replace('4', '')
except:
device2 = ''
devicetype = path.realpath('/sys/block/' + device2 + '/device')
d2 = device
name = 'USB: '
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_usbstick.png'
model = file('/sys/block/' + device2 + '/device/model').read()
model = str(model).replace('\n', '')
des = ''
if devicetype.find('/devices/pci') != -1:
name = _("HARD DISK: ")
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_hdd.png'
name = name + model
f = open('/proc/mounts', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
d1 = parts[1]
dtype = parts[2]
break
continue
else:
d1 = _("None")
dtype = _("unavailable")
f.close()
f = open('/proc/partitions', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
size = int(parts[2])
if (((float(size) / 1024) / 1024) / 1024) > 1:
des = _("Size: ") + str(round((((float(size) / 1024) / 1024) / 1024),2)) + _("TB")
elif ((size / 1024) / 1024) > 1:
des = _("Size: ") + str((size / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str(size / 1024) + _("MB")
else:
try:
size = file('/sys/block/' + device2 + '/' + device + '/size').read()
size = str(size).replace('\n', '')
size = int(size)
except:
size = 0
if ((((float(size) / 2) / 1024) / 1024) / 1024) > 1:
des = _("Size: ") + str(round(((((float(size) / 2) / 1024) / 1024) / 1024),2)) + _("TB")
elif (((size / 2) / 1024) / 1024) > 1:
des = _("Size: ") + str(((size / 2) / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str((size / 2) / 1024) + _("MB")
f.close()
item = NoSave(ConfigSelection(default='/media/' + device, choices=[('/media/' + device, '/media/' + device),
('/media/hdd', '/media/hdd'),
('/media/hdd2', '/media/hdd2'),
('/media/hdd3', '/media/hdd3'),
('/media/usb', '/media/usb'),
('/media/usb2', '/media/usb2'),
('/media/usb3', '/media/usb3'),
('/usr', '/usr')]))
if dtype == 'Linux':
dtype = 'ext3'
else:
dtype = 'auto'
item.value = d1.strip()
text = name + ' ' + des + ' /dev/' + device
res = getConfigListEntry(text, item, device, dtype)
if des != '' and self.list.append(res):
pass
def saveMypoints(self):
self.Console = Console()
mycheck = False
for x in self['config'].list:
self.device = x[2]
self.mountp = x[1].value
self.type = x[3]
self.Console.ePopen('umount ' + self.device)
self.Console.ePopen("/sbin/blkid | grep " + self.device, self.add_fstab, [self.device, self.mountp] )
message = _("Updating mount locations.")
ybox = self.session.openWithCallback(self.delay, MessageBox, message, type=MessageBox.TYPE_INFO, timeout=5, enable_input = False)
ybox.setTitle(_("Please wait."))
def delay(self, val):
message = _("Changes need a system restart to take effect.\nRestart your %s %s now?") % (getMachineBrand(), getMachineName())
ybox = self.session.openWithCallback(self.restartBox, MessageBox, message, MessageBox.TYPE_YESNO)
ybox.setTitle(_("Restart %s %s.") % (getMachineBrand(), getMachineName()))
def add_fstab(self, result = None, retval = None, extra_args = None):
self.device = extra_args[0]
self.mountp = extra_args[1]
self.device_tmp = result.split(' ')
if self.device_tmp[0].startswith('UUID='):
self.device_uuid = self.device_tmp[0].replace('"',"")
self.device_uuid = self.device_uuid.replace('\n',"")
elif self.device_tmp[1].startswith('UUID='):
self.device_uuid = self.device_tmp[1].replace('"',"")
self.device_uuid = self.device_uuid.replace('\n',"")
elif self.device_tmp[2].startswith('UUID='):
self.device_uuid = self.device_tmp[2].replace('"',"")
self.device_uuid = self.device_uuid.replace('\n',"")
elif self.device_tmp[3].startswith('UUID='):
self.device_uuid = self.device_tmp[3].replace('"',"")
self.device_uuid = self.device_uuid.replace('\n',"")
try:
if self.device_tmp[0].startswith('TYPE='):
self.device_type = self.device_tmp[0].replace('TYPE=',"")
self.device_type = self.device_type.replace('"',"")
self.device_type = self.device_type.replace('\n',"")
elif self.device_tmp[1].startswith('TYPE='):
self.device_type = self.device_tmp[1].replace('TYPE=',"")
self.device_type = self.device_type.replace('"',"")
self.device_type = self.device_type.replace('\n',"")
elif self.device_tmp[2].startswith('TYPE='):
self.device_type = self.device_tmp[2].replace('TYPE=',"")
self.device_type = self.device_type.replace('"',"")
self.device_type = self.device_type.replace('\n',"")
elif self.device_tmp[3].startswith('TYPE='):
self.device_type = self.device_tmp[3].replace('TYPE=',"")
self.device_type = self.device_type.replace('"',"")
self.device_type = self.device_type.replace('\n',"")
elif self.device_tmp[4].startswith('TYPE='):
self.device_type = self.device_tmp[4].replace('TYPE=',"")
self.device_type = self.device_type.replace('"',"")
self.device_type = self.device_type.replace('\n',"")
except:
self.device_type = 'auto'
if self.device_type.startswith('ext'):
self.device_type = 'auto'
if not path.exists(self.mountp):
mkdir(self.mountp, 0755)
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if self.device not in l])
rename('/etc/fstab.tmp','/etc/fstab')
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if self.device_uuid not in l])
rename('/etc/fstab.tmp','/etc/fstab')
out = open('/etc/fstab', 'a')
line = self.device_uuid + '\t' + self.mountp + '\t' + self.device_type + '\tdefaults\t0 0\n'
out.write(line)
out.close()
def restartBox(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 2)
else:
self.close()
class DevicesPanelSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["entry"] = StaticText("")
self["desc"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.selectionChanged()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, desc):
self["entry"].text = name
self["desc"].text = desc
| popazerty/test-1 | lib/python/Plugins/Extensions/ExtrasPanel/MountManager.py | Python | gpl-2.0 | 20,177 |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Andrew Bettison <andrewb@zip.com.au> #
# Copyright 2012 Dima Kukushkin <dima@kukushkin.me> #
# Copyright 2012 Michael Woodworth <mwoodworth@upverter.com> #
# Copyright 2012 Petteri Muilu <pmuilu@xena.(none)> #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Cameron White <cawhite@pdx.edu> #
# Copyright 2013 Ed Jackson <ed.jackson@gmail.com> #
# Copyright 2013 Jonathan J Hunt <hunt@braincorporation.com> #
# Copyright 2013 Mark Roddy <markroddy@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Jimmy Zelinskie <jimmyzelinskie@gmail.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Brian Eugley <Brian.Eugley@capitalone.com> #
# Copyright 2015 Daniel Pocock <daniel@pocock.pro> #
# Copyright 2015 Jimmy Zelinskie <jimmyzelinskie@gmail.com> #
# Copyright 2016 Denis K <f1nal@cgaming.org> #
# Copyright 2016 Jared K. Smith <jaredsmith@jaredsmith.net> #
# Copyright 2016 Jimmy Zelinskie <jimmy.zelinskie+git@gmail.com> #
# Copyright 2016 Mathieu Mitchell <mmitchell@iweb.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Chris McBride <thehighlander@users.noreply.github.com> #
# Copyright 2017 Hugo <hugovk@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Dylan <djstein@ncsu.edu> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Mike Miller <github@mikeage.net> #
# Copyright 2018 R1kk3r <R1kk3r@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import base64
import json
import logging
import mimetypes
import os
import re
import requests
import sys
import time
import urllib
import urlparse
from io import IOBase
import Consts
import GithubException
atLeastPython3 = sys.hexversion >= 0x03000000
class RequestsResponse:
# mimic the httplib response object
def __init__(self, r):
self.status = r.status_code
self.headers = r.headers
self.text = r.text
def getheaders(self):
if atLeastPython3:
return self.headers.items()
else:
return self.headers.iteritems()
def read(self):
return self.text
class HTTPSRequestsConnectionClass(object):
# mimic the httplib connection object
def __init__(self, host, port=None, strict=False, timeout=None, retry=None, **kwargs):
self.port = port if port else 443
self.host = host
self.protocol = "https"
self.timeout = timeout
self.verify = kwargs.get("verify", True)
self.session = requests.Session()
# Code to support retries
if retry:
self.retry = retry
self.adapter = requests.adapters.HTTPAdapter(max_retries=self.retry)
self.session.mount('https://', self.adapter)
def request(self, verb, url, input, headers):
self.verb = verb
self.url = url
self.input = input
self.headers = headers
def getresponse(self):
verb = getattr(self.session, self.verb.lower())
url = "%s://%s:%s%s" % (self.protocol, self.host, self.port, self.url)
r = verb(url, headers=self.headers, data=self.input, timeout=self.timeout, verify=self.verify, allow_redirects=False)
return RequestsResponse(r)
def close(self):
return
class HTTPRequestsConnectionClass(object):
# mimic the httplib connection object
def __init__(self, host, port=None, strict=False, timeout=None, retry=None, **kwargs):
self.port = port if port else 80
self.host = host
self.protocol = "http"
self.timeout = timeout
self.verify = kwargs.get("verify", True)
self.session = requests.Session()
# Code to support retries
if retry:
self.retry = retry
self.adapter = requests.adapters.HTTPAdapter(max_retries=self.retry)
self.session.mount('http://', self.adapter)
def request(self, verb, url, input, headers):
self.verb = verb
self.url = url
self.input = input
self.headers = headers
def getresponse(self):
verb = getattr(self.session, self.verb.lower())
url = "%s://%s:%s%s" % (self.protocol, self.host, self.port, self.url)
r = verb(url, headers=self.headers, data=self.input, timeout=self.timeout, verify=self.verify, allow_redirects=False)
return RequestsResponse(r)
def close(self):
return
class Requester:
__httpConnectionClass = HTTPRequestsConnectionClass
__httpsConnectionClass = HTTPSRequestsConnectionClass
__connection = None
__persist = True
@classmethod
def injectConnectionClasses(cls, httpConnectionClass, httpsConnectionClass):
cls.__persist = False
cls.__httpConnectionClass = httpConnectionClass
cls.__httpsConnectionClass = httpsConnectionClass
@classmethod
def resetConnectionClasses(cls):
cls.__persist = True
cls.__httpConnectionClass = HTTPRequestsConnectionClass
cls.__httpsConnectionClass = HTTPSRequestsConnectionClass
#############################################################
# For Debug
@classmethod
def setDebugFlag(cls, flag):
cls.DEBUG_FLAG = flag
@classmethod
def setOnCheckMe(cls, onCheckMe):
cls.ON_CHECK_ME = onCheckMe
DEBUG_FLAG = False
DEBUG_FRAME_BUFFER_SIZE = 1024
DEBUG_HEADER_KEY = "DEBUG_FRAME"
ON_CHECK_ME = None
def NEW_DEBUG_FRAME(self, requestHeader):
"""
Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
new_frame = [requestHeader, None, None, None]
if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered)
self._frameBuffer.append(new_frame)
else:
self._frameBuffer[0] = new_frame # pragma no cover (Should be covered)
self._frameCount = len(self._frameBuffer) - 1
def DEBUG_ON_RESPONSE(self, statusCode, responseHeader, data):
'''
Update current frame with response
Current frame index will be attached to responseHeader
'''
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
self._frameBuffer[self._frameCount][1:4] = [statusCode, responseHeader, data]
responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount
def check_me(self, obj):
if self.DEBUG_FLAG and self.ON_CHECK_ME is not None: # pragma no branch (Flag always set in tests)
frame = None
if self.DEBUG_HEADER_KEY in obj._headers:
frame_index = obj._headers[self.DEBUG_HEADER_KEY]
frame = self._frameBuffer[frame_index]
self.ON_CHECK_ME(obj, frame)
def _initializeDebugFeature(self):
self._frameCount = 0
self._frameBuffer = []
#############################################################
def __init__(self, login_or_token, password, jwt, base_url, timeout, client_id, client_secret, user_agent, per_page, api_preview, verify, retry):
self._initializeDebugFeature()
if password is not None:
login = login_or_token
if atLeastPython3:
self.__authorizationHeader = "Basic " + base64.b64encode((login + ":" + password).encode("utf-8")).decode("utf-8").replace('\n', '') # pragma no cover (Covered by Authentication.testAuthorizationHeaderWithXxx with Python 3)
else:
self.__authorizationHeader = "Basic " + base64.b64encode(login + ":" + password).replace('\n', '')
elif login_or_token is not None:
token = login_or_token
self.__authorizationHeader = "token " + token
elif jwt is not None:
self.__authorizationHeader = "Bearer " + jwt
else:
self.__authorizationHeader = None
self.__base_url = base_url
o = urlparse.urlparse(base_url)
self.__hostname = o.hostname
self.__port = o.port
self.__prefix = o.path
self.__timeout = timeout
self.__retry = retry # NOTE: retry can be either int or an urllib3 Retry object
self.__scheme = o.scheme
if o.scheme == "https":
self.__connectionClass = self.__httpsConnectionClass
elif o.scheme == "http":
self.__connectionClass = self.__httpConnectionClass
else:
assert False, "Unknown URL scheme"
self.rate_limiting = (-1, -1)
self.rate_limiting_resettime = 0
self.FIX_REPO_GET_GIT_REF = True
self.per_page = per_page
self.oauth_scopes = None
self.__clientId = client_id
self.__clientSecret = client_secret
assert user_agent is not None, 'github now requires a user-agent. ' \
'See http://developer.github.com/v3/#user-agent-required'
self.__userAgent = user_agent
self.__apiPreview = api_preview
self.__verify = verify
def requestJsonAndCheck(self, verb, url, parameters=None, headers=None, input=None):
return self.__check(*self.requestJson(verb, url, parameters, headers, input, self.__customConnection(url)))
def requestMultipartAndCheck(self, verb, url, parameters=None, headers=None, input=None):
return self.__check(*self.requestMultipart(verb, url, parameters, headers, input, self.__customConnection(url)))
def requestBlobAndCheck(self, verb, url, parameters=None, headers=None, input=None):
return self.__check(*self.requestBlob(verb, url, parameters, headers, input, self.__customConnection(url)))
def __check(self, status, responseHeaders, output):
output = self.__structuredFromJson(output)
if status >= 400:
raise self.__createException(status, responseHeaders, output)
return responseHeaders, output
def __customConnection(self, url):
cnx = None
if not url.startswith("/"):
o = urlparse.urlparse(url)
if o.hostname != self.__hostname or \
(o.port and o.port != self.__port) or \
(o.scheme != self.__scheme and not (o.scheme == "https" and self.__scheme == "http")): # issue80
if o.scheme == 'http':
cnx = self.__httpConnectionClass(o.hostname, o.port, retry = self.__retry)
elif o.scheme == 'https':
cnx = self.__httpsConnectionClass(o.hostname, o.port, retry = self.__retry)
return cnx
def __createException(self, status, headers, output):
if status == 401 and output.get("message") == "Bad credentials":
cls = GithubException.BadCredentialsException
elif status == 401 and Consts.headerOTP in headers and re.match(r'.*required.*', headers[Consts.headerOTP]):
cls = GithubException.TwoFactorException # pragma no cover (Should be covered)
elif status == 403 and output.get("message").startswith("Missing or invalid User Agent string"):
cls = GithubException.BadUserAgentException
elif status == 403 and (
output.get("message").lower().startswith("api rate limit exceeded")
or output.get("message").lower().endswith("please wait a few minutes before you try again.")
):
cls = GithubException.RateLimitExceededException
elif status == 404 and output.get("message") == "Not Found":
cls = GithubException.UnknownObjectException
else:
cls = GithubException.GithubException
return cls(status, output)
def __structuredFromJson(self, data):
if len(data) == 0:
return None
else:
if atLeastPython3 and isinstance(data, bytes): # pragma no branch (Covered by Issue142.testDecodeJson with Python 3)
data = data.decode("utf-8") # pragma no cover (Covered by Issue142.testDecodeJson with Python 3)
try:
return json.loads(data)
except ValueError, e:
return {'data': data}
def requestJson(self, verb, url, parameters=None, headers=None, input=None, cnx=None):
def encode(input):
return "application/json", json.dumps(input)
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def requestMultipart(self, verb, url, parameters=None, headers=None, input=None, cnx=None):
def encode(input):
boundary = "----------------------------3c3ba8b523b2"
eol = "\r\n"
encoded_input = ""
for name, value in input.iteritems():
encoded_input += "--" + boundary + eol
encoded_input += "Content-Disposition: form-data; name=\"" + name + "\"" + eol
encoded_input += eol
encoded_input += value + eol
encoded_input += "--" + boundary + "--" + eol
return "multipart/form-data; boundary=" + boundary, encoded_input
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def requestBlob(self, verb, url, parameters={}, headers={}, input=None, cnx=None):
def encode(local_path):
if "Content-Type" in headers:
mime_type = headers["Content-Type"]
else:
guessed_type = mimetypes.guess_type(input)
mime_type = guessed_type[0] if guessed_type[0] is not None else Consts.defaultMediaType
f = open(local_path, 'rb')
return mime_type, f
if input:
headers["Content-Length"] = str(os.path.getsize(input))
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def __requestEncode(self, cnx, verb, url, parameters, requestHeaders, input, encode):
assert verb in ["HEAD", "GET", "POST", "PATCH", "PUT", "DELETE"]
if parameters is None:
parameters = dict()
if requestHeaders is None:
requestHeaders = dict()
self.__authenticate(url, requestHeaders, parameters)
requestHeaders["User-Agent"] = self.__userAgent
if self.__apiPreview:
requestHeaders["Accept"] = "application/vnd.github.moondragon+json"
url = self.__makeAbsoluteUrl(url)
url = self.__addParametersToUrl(url, parameters)
encoded_input = None
if input is not None:
requestHeaders["Content-Type"], encoded_input = encode(input)
self.NEW_DEBUG_FRAME(requestHeaders)
status, responseHeaders, output = self.__requestRaw(cnx, verb, url, requestHeaders, encoded_input)
if Consts.headerRateRemaining in responseHeaders and Consts.headerRateLimit in responseHeaders:
self.rate_limiting = (int(responseHeaders[Consts.headerRateRemaining]), int(responseHeaders[Consts.headerRateLimit]))
if Consts.headerRateReset in responseHeaders:
self.rate_limiting_resettime = int(responseHeaders[Consts.headerRateReset])
if Consts.headerOAuthScopes in responseHeaders:
self.oauth_scopes = responseHeaders[Consts.headerOAuthScopes].split(", ")
self.DEBUG_ON_RESPONSE(status, responseHeaders, output)
return status, responseHeaders, output
def __requestRaw(self, cnx, verb, url, requestHeaders, input):
original_cnx = cnx
if cnx is None:
cnx = self.__createConnection()
cnx.request(
verb,
url,
input,
requestHeaders
)
response = cnx.getresponse()
status = response.status
responseHeaders = dict((k.lower(), v) for k, v in response.getheaders())
output = response.read()
cnx.close()
if input:
if isinstance(input, IOBase):
input.close()
self.__log(verb, url, requestHeaders, input, status, responseHeaders, output)
if status == 202 and (verb == 'GET' or verb == 'HEAD'): # only for requests that are considered 'safe' in RFC 2616
time.sleep(Consts.PROCESSING_202_WAIT_TIME)
return self.__requestRaw(original_cnx, verb, url, requestHeaders, input)
if status == 301 and 'location' in responseHeaders:
o = urlparse.urlparse(responseHeaders['location'])
return self.__requestRaw(original_cnx, verb, o.path, requestHeaders, input)
return status, responseHeaders, output
def __authenticate(self, url, requestHeaders, parameters):
if self.__clientId and self.__clientSecret and "client_id=" not in url:
parameters["client_id"] = self.__clientId
parameters["client_secret"] = self.__clientSecret
if self.__authorizationHeader is not None:
requestHeaders["Authorization"] = self.__authorizationHeader
def __makeAbsoluteUrl(self, url):
# URLs generated locally will be relative to __base_url
# URLs returned from the server will start with __base_url
if url.startswith("/"):
url = self.__prefix + url
else:
o = urlparse.urlparse(url)
assert o.hostname in [self.__hostname, "uploads.github.com", "status.github.com"], o.hostname
assert o.path.startswith((self.__prefix, "/api/"))
assert o.port == self.__port
url = o.path
if o.query != "":
url += "?" + o.query
return url
def __addParametersToUrl(self, url, parameters):
if len(parameters) == 0:
return url
else:
return url + "?" + urllib.urlencode(parameters)
def __createConnection(self):
kwds = {}
if not atLeastPython3: # pragma no branch (Branch useful only with Python 3)
kwds["strict"] = True # Useless in Python3, would generate a deprecation warning
kwds["timeout"] = self.__timeout
kwds["verify"] = self.__verify
if self.__persist and self.__connection is not None:
return self.__connection
self.__connection = self.__connectionClass(self.__hostname, self.__port, retry = self.__retry, **kwds)
return self.__connection
def __log(self, verb, url, requestHeaders, input, status, responseHeaders, output):
logger = logging.getLogger(__name__)
if logger.isEnabledFor(logging.DEBUG):
if "Authorization" in requestHeaders:
if requestHeaders["Authorization"].startswith("Basic"):
requestHeaders["Authorization"] = "Basic (login and password removed)"
elif requestHeaders["Authorization"].startswith("token"):
requestHeaders["Authorization"] = "token (oauth token removed)"
elif requestHeaders["Authorization"].startswith("Bearer"):
requestHeaders["Authorization"] = "Bearer (jwt removed)"
else: # pragma no cover (Cannot happen, but could if we add an authentication method => be prepared)
requestHeaders["Authorization"] = "(unknown auth removed)" # pragma no cover (Cannot happen, but could if we add an authentication method => be prepared)
logger.debug("%s %s://%s%s %s %s ==> %i %s %s", verb, self.__scheme, self.__hostname, url, requestHeaders, input, status, responseHeaders, output)
| danielecook/gist-alfred | github/Requester.py | Python | mit | 22,248 |
from enum import Enum
# ChessBoard Info
ChessBoardHeight = 10
ChessBoardWidth = 10
# Role Info
Roles = {'Human':0,'Computer':1,'Self_A':2,'Self_B':3, 'Eva_base':4, 'Eva_new':5}
# Chess Info
ChessInfo = {'Human':-1,'Computer':1,'Self_A':1,'Self_B':-1, 'Eva_base':1, 'Eva_new':-1}
# Victory Info
Victories = {'Human':-1,'Computer':1,"NoOneWin":0,'Self_A':1, 'Self_B':-1, 'Eva_base':1, 'Eva_new':-1} | longyangking/AlphaGomoku | alphagomoku/Config.py | Python | lgpl-2.1 | 401 |
"""empty message
Revision ID: 17fa4624585
Revises: 551ba562ce44
Create Date: 2015-03-11 00:03:19.301176
"""
# revision identifiers, used by Alembic.
revision = '17fa4624585'
down_revision = '551ba562ce44'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| jglamine/phamdb | webphamerator/migrations/versions/17fa4624585_.py | Python | gpl-3.0 | 504 |
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
#
import uuid
from datetime import datetime
import testUtils
import time
import pytest
from wiotp.sdk.exceptions import ApiException
import string
import json
from wiotp.sdk.api.registry.devices import DeviceUid, DeviceInfo, DeviceCreateRequest, DeviceLocation
@testUtils.oneJobOnlyTest
class TestDevice(testUtils.AbstractTest):
# Physical Interface Stuff
testEventSchemaName = "python-api-test-dt-pi_schema"
testEventSchema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "Sensor Event Schema",
"properties": {
"temperature": {
"description": "temperature in degrees Celsius",
"type": "number",
"minimum": -237.15,
"default": 0.0,
},
"humidity": {"description": "relative humidty (%)", "type": "number", "minimum": 0.0, "default": 0.0},
"publishTimestamp": {"description": "publishTimestamp", "type": "number", "minimum": 0.0, "default": 0.0},
},
"required": ["temperature", "humidity", "publishTimestamp"],
}
testEventTypeName = "python-api-test-dt-pi_eventType"
testEventId = "python-api-test-dt-pi_eventId"
testPhysicalInterfaceName = "python-api-test-dt-pi"
# Logical Interface Stuff
testLiSchemaName = "python-api-test-dt-li-schema"
testLISchema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "Environment Sensor Schema",
"properties": {
"temperature": {
"description": "temperature in degrees Celsius",
"type": "number",
"minimum": -237.15,
"default": 0.0,
},
"humidity": {"description": "relative humidity (%)", "type": "number", "minimum": 0.0, "default": 0.0},
"publishTimestamp": {"description": "publishTimestamp", "type": "number", "minimum": 0.0, "default": 0.0},
},
"required": ["temperature", "humidity", "publishTimestamp"],
}
testLogicalInterfaceName = "python-api-test-dt-li"
testDeviceTypeName = "python-api-test-DeviceType"
updatedDeviceTypeName = "python-api-test-DeviceType-updated"
# =========================================================================
# Set up services
# =========================================================================
def testCleanup(self):
# delete any left over device types
for dt in self.appClient.state.active.deviceTypes:
# print("Device type instance: %s" % (dt))
if dt.id in (TestDevice.testDeviceTypeName, TestDevice.updatedDeviceTypeName):
for dev in dt.devices:
print("Deleting devices %s for device type instance: %s" % (dev.deviceId, dt.id))
del dt.devices[dev.deviceId]
print("Deleting old test device type instance: %s" % (dt.id))
del self.appClient.state.active.deviceTypes[dt.id]
# delete any left over logical interfaces
for li in self.appClient.state.draft.logicalInterfaces:
if li.name == TestDevice.testLogicalInterfaceName:
print("Deleting old test LI: %s" % (li))
del self.appClient.state.draft.logicalInterfaces[li.id]
# delete any left over physical interfaces, event type and schema
for pi in self.appClient.state.draft.physicalInterfaces:
if pi.name == TestDevice.testPhysicalInterfaceName:
print("Deleting old test PI: %s" % (pi))
del self.appClient.state.draft.physicalInterfaces[pi.id]
for et in self.appClient.state.draft.eventTypes:
if et.name == TestDevice.testEventTypeName:
print("Deleting old test event type: %s" % (et))
del self.appClient.state.draft.eventTypes[et.id]
for s in self.appClient.state.draft.schemas:
if s.name in (TestDevice.testEventSchemaName, TestDevice.testLiSchemaName):
print("Deleting old test schema instance: %s" % (s))
del self.appClient.state.draft.schemas[s.id]
# TBD this was all debugv stuff
# for DT in self.appClient.state.active.deviceTypes:
# print ("Active Device Type: %s" % DT)
# for li in self.appClient.state.draft.logicalInterfaces:
# #print ("Logical Interface: %s" % li.id)
# for DT in self.appClient.state.draft.deviceTypes.find({"logicalInterfaceId":li.id}):
# print ("LI: %s, Draft Device Type: %s" % (li.id, DT))
# newPI = DT.physicalInterface
# print ("DT physicalInterface: %s" % DT.physicalInterface)
# for subLi in DT.logicalInterfaces:
# print ("LI: %s" % (subLi.id))
# for map in DT.mappings:
# print ("Mapping: %s" % (map))
# return
def checkDT(
self, DeviceType, name, description, deviceInfo=None, metadata=None, edgeConfiguration=None, classId="Device"
):
assert DeviceType.id == name
assert DeviceType.description == description
# TBD more needed here
def doesSchemaNameExist(self, name):
for a in self.appClient.state.draft.schemas.find({"name": name}):
if a.name == name:
return True
return False
def doesEventTypeNameExist(self, name):
for et in self.appClient.state.draft.eventTypes.find({"name": name}):
if et.name == name:
return True
return False
def doesPINameExist(self, name):
for pi in self.appClient.state.draft.physicalInterfaces.find({"name": name}):
if pi.name == name:
return True
return False
def doesLINameExist(self, name):
for li in self.appClient.state.draft.logicalInterfaces.find({"name": name}):
if li.name == name:
return True
return False
def doesDTNameExist(self, name):
for dt in self.appClient.state.active.deviceTypes.find({"id": name}):
if dt.id == name:
return True
return False
def doesActiveSchemaNameExist(self, name):
for a in self.appClient.state.active.schemas.find({"name": name}):
if a.name == name:
return True
return False
def doesActiveEventTypeNameExist(self, name):
for et in self.appClient.state.active.eventTypes.find({"name": name}):
if et.name == name:
return True
return False
def doesActivePINameExist(self, name):
for pi in self.appClient.state.active.physicalInterfaces.find({"name": name}):
if pi.name == name:
return True
return False
def doesActiveLINameExist(self, name):
for li in self.appClient.state.active.logicalInterfaces.find({"name": name}):
if li.name == name:
return True
return False
def doesActiveDTNameExist(self, name):
for dt in self.appClient.state.active.deviceTypes.find({"id": name}):
if dt.id == name:
return True
return False
def createSchema(self, name, schemaFileName, schemaContents, description):
jsonSchemaContents = json.dumps(schemaContents)
createdSchema = self.appClient.state.draft.schemas.create(name, schemaFileName, jsonSchemaContents, description)
return createdSchema
def createEventType(self, name, description, schemaId):
createdEventType = self.appClient.state.draft.eventTypes.create(
{"name": name, "description": description, "schemaId": schemaId}
)
return createdEventType
def createPI(self, name, description):
createdPI = self.appClient.state.draft.physicalInterfaces.create({"name": name, "description": description})
return createdPI
def comparePIs(self, PI1, PI2):
assert PI1.id == PI2.id
assert PI1.name == PI2.name
assert PI1.description == PI2.description
assert PI1.version == PI2.version
assert PI1.events == PI2.events
def createLI(self, name, description, schemaId):
createdLI = self.appClient.state.draft.logicalInterfaces.create(
{"name": name, "description": description, "schemaId": schemaId}
)
return createdLI
def createAndCheckDT(
self, name, description, deviceInfo=None, metadata=None, edgeConfiguration=None, classId="Device"
):
payload = {
"id": name,
"description": description,
"deviceInfo": deviceInfo,
"metadata": metadata,
"classId": classId,
"edgeConfiguration": edgeConfiguration,
}
createdDT = self.appClient.state.active.deviceTypes.create(payload)
self.checkDT(createdDT, name, description, deviceInfo, metadata, edgeConfiguration, classId)
# now actively refetch the DT to check it is stored
fetchedDT = self.appClient.state.active.deviceTypes.__getitem__(createdDT.id)
assert createdDT == fetchedDT
return createdDT
def testCreatePreReqs(self):
# LI
test_schema_name = TestDevice.testLiSchemaName
assert self.doesSchemaNameExist(test_schema_name) == False
testLIName = TestDevice.testLogicalInterfaceName
assert self.doesLINameExist(testLIName) == False
# Create a schema
TestDevice.createdLISchema = self.createSchema(
test_schema_name, "liSchema.json", TestDevice.testLISchema, "Test schema description"
)
# Create a Logical Interface
TestDevice.createdLI = self.createLI(
testLIName, "Test Logical Interface description", TestDevice.createdLISchema.id
)
# PI
test_schema_name = TestDevice.testEventSchemaName
assert self.doesSchemaNameExist(test_schema_name) == False
test_eventType_name = TestDevice.testEventTypeName
assert self.doesEventTypeNameExist(test_eventType_name) == False
testPIName = TestDevice.testPhysicalInterfaceName
assert self.doesPINameExist(testPIName) == False
# Create a schema
TestDevice.createdEventSchema = self.createSchema(
test_schema_name, "eventSchema.json", TestDevice.testEventSchema, "Test schema description"
)
# Create an eventType
TestDevice.createdEventType = self.createEventType(
test_eventType_name, "Test event type description", TestDevice.createdEventSchema.id
)
# Create a Physical Interface
TestDevice.createdPI = self.createPI(testPIName, "Test Physical Interface description")
# Associate event with PI
TestDevice.createdPI.events.create(
{"eventId": TestDevice.testEventId, "eventTypeId": TestDevice.createdEventType.id}
)
test_dt_name = TestDevice.testDeviceTypeName
assert self.doesDTNameExist(test_dt_name) == False
# Create a Device Type
TestDevice.createdDT = self.createAndCheckDT(test_dt_name, "Test Device Type description")
def testRegisterDevice(self):
createdDeviceId = str(uuid.uuid4())
deviceInfo = {"serialNumber": "123", "descriptiveLocation": "Floor 3, Room 2"}
metadata = {"customField1": "customValue1", "customField2": "customValue2"}
TestDevice.createdDevice = TestDevice.createdDT.devices.create(
{
"deviceId": createdDeviceId,
"authToken": "NotVerySecretPassw0rd",
"deviceInfo": deviceInfo,
"metadata": metadata,
}
)
# read it back to check it's there
for retrievedDevice in TestDevice.createdDT.devices:
assert retrievedDevice.typeId == TestDevice.createdDT.id
assert retrievedDevice.deviceId == createdDeviceId
assert retrievedDevice.metadata == metadata
assert retrievedDevice.registration != None
assert retrievedDevice.status != None
assert retrievedDevice.deviceInfo == deviceInfo
def testDeletePreReqs(self):
del self.appClient.state.active.deviceTypes[TestDevice.createdDT.id].devices[TestDevice.createdDevice.deviceId]
del self.appClient.state.active.deviceTypes[TestDevice.createdDT.id]
assert self.doesDTNameExist(TestDevice.testDeviceTypeName) == False
del self.appClient.state.draft.physicalInterfaces[TestDevice.createdPI.id]
assert self.doesPINameExist(TestDevice.testPhysicalInterfaceName) == False
del self.appClient.state.draft.eventTypes[TestDevice.createdEventType.id]
assert self.doesEventTypeNameExist(TestDevice.testEventTypeName) == False
del self.appClient.state.draft.schemas[TestDevice.createdEventSchema.id]
assert self.doesSchemaNameExist(TestDevice.testEventSchemaName) == False
# Delete the LI
del self.appClient.state.draft.logicalInterfaces[TestDevice.createdLI.id]
assert self.doesLINameExist(TestDevice.testLogicalInterfaceName) == False
# Delete the schema
del self.appClient.state.draft.schemas[TestDevice.createdLISchema.id]
assert self.doesSchemaNameExist(TestDevice.testLiSchemaName) == False
| ibm-watson-iot/iot-python | test/test_api_state_devices.py | Python | epl-1.0 | 13,924 |
"""
Module for three dimensional baroclinic solver
"""
from __future__ import absolute_import
from .utility import *
from . import shallowwater_eq
from . import momentum_eq
from . import tracer_eq
from . import turbulence
from . import coupled_timeintegrator
import thetis.limiter as limiter
import time as time_mod
import numpy as np
from mpi4py import MPI
from . import exporter
import weakref
from .field_defs import field_metadata
from .options import ModelOptions3d
from . import callback
from .log import *
from collections import OrderedDict
class FlowSolver(FrozenClass):
"""
Main object for 3D solver
**Example**
Create 2D mesh
.. code-block:: python
from thetis import *
mesh2d = RectangleMesh(20, 20, 10e3, 10e3)
Create bathymetry function, set a constant value
.. code-block:: python
fs_p1 = FunctionSpace(mesh2d, 'CG', 1)
bathymetry_2d = Function(fs_p1, name='Bathymetry').assign(10.0)
Create a 3D model with 6 uniform levels, and set some options
(see :class:`.ModelOptions3d`)
.. code-block:: python
solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers=6)
options = solver_obj.options
options.element_family = 'dg-dg'
options.polynomial_degree = 1
options.timestepper_type = 'SSPRK22'
options.timestepper_options.use_automatic_timestep = False
options.solve_salinity = False
options.solve_temperature = False
options.simulation_export_time = 50.0
options.simulation_end_time = 3600.
options.timestep = 25.0
Assign initial condition for water elevation
.. code-block:: python
solver_obj.create_function_spaces()
init_elev = Function(solver_obj.function_spaces.H_2d)
coords = SpatialCoordinate(mesh2d)
init_elev.project(2.0*exp(-((coords[0] - 4e3)**2 + (coords[1] - 4.5e3)**2)/2.2e3**2))
solver_obj.assign_initial_conditions(elev=init_elev)
Run simulation
.. code-block:: python
solver_obj.iterate()
See the manual for more complex examples.
"""
def __init__(self, mesh2d, bathymetry_2d, n_layers,
options=None, extrude_options=None):
"""
:arg mesh2d: :class:`Mesh` object of the 2D mesh
:arg bathymetry_2d: Bathymetry of the domain. Bathymetry stands for
the mean water depth (positive downwards).
:type bathymetry_2d: 2D :class:`Function`
:arg int n_layers: Number of layers in the vertical direction.
Elements are distributed uniformly over the vertical.
:kwarg options: Model options (optional). Model options can also be
changed directly via the :attr:`.options` class property.
:type options: :class:`.ModelOptions3d` instance
"""
self._initialized = False
self.bathymetry_cg_2d = bathymetry_2d
self.mesh2d = mesh2d
"""2D :class`Mesh`"""
if extrude_options is None:
extrude_options = {}
self.mesh = extrude_mesh_sigma(mesh2d, n_layers, bathymetry_2d, **extrude_options)
"""3D :class`Mesh`"""
self.comm = mesh2d.comm
# add boundary length info
bnd_len = compute_boundary_length(self.mesh2d)
self.mesh2d.boundary_len = bnd_len
self.mesh.boundary_len = bnd_len
self.dt = None
"""Time step"""
self.dt_2d = None
"""Time of the 2D solver"""
self.M_modesplit = None
"""Mode split ratio (int)"""
# override default options
self.options = ModelOptions3d()
"""
Dictionary of all options. A :class:`.ModelOptions3d` object.
"""
if options is not None:
self.options.update(options)
# simulation time step bookkeeping
self.simulation_time = 0
self.iteration = 0
self.i_export = 0
self.next_export_t = self.simulation_time + self.options.simulation_export_time
self.bnd_functions = {'shallow_water': {},
'momentum': {},
'salt': {},
'temp': {},
}
self.callbacks = callback.CallbackManager()
"""
:class:`.CallbackManager` object that stores all callbacks
"""
self.fields = FieldDict()
"""
:class:`.FieldDict` that holds all functions needed by the solver
object
"""
self.function_spaces = AttrDict()
"""
:class:`.AttrDict` that holds all function spaces needed by the
solver object
"""
self.export_initial_state = True
"""Do export initial state. False if continuing a simulation"""
self._simulation_continued = False
self._isfrozen = True
def compute_dx_factor(self):
"""
Computes normalized distance between nodes in the horizontal direction
The factor depends on the finite element space and its polynomial
degree. It is used to compute maximal stable time steps.
"""
p = self.options.polynomial_degree
if self.options.element_family == 'rt-dg':
# velocity space is essentially p+1
p = self.options.polynomial_degree + 1
# assuming DG basis functions on triangles
l_r = p**2/3.0 + 7.0/6.0*p + 1.0
factor = 0.5*0.25/l_r
return factor
def compute_dz_factor(self):
"""
Computes a normalized distance between nodes in the vertical direction
The factor depends on the finite element space and its polynomial
degree. It is used to compute maximal stable time steps.
"""
p = self.options.polynomial_degree
# assuming DG basis functions in an interval
l_r = 1.0/max(p, 1)
factor = 0.5*0.25/l_r
return factor
def compute_dt_2d(self, u_scale):
r"""
Computes maximum explicit time step from CFL condition.
.. math :: \Delta t = \frac{\Delta x}{U}
Assumes velocity scale :math:`U = \sqrt{g H} + U_{scale}` where
:math:`U_{scale}` is estimated advective velocity.
:arg u_scale: User provided maximum advective velocity scale
:type u_scale: float or :class:`Constant`
"""
csize = self.fields.h_elem_size_2d
bath = self.fields.bathymetry_2d
fs = bath.function_space()
bath_pos = Function(fs, name='bathymetry')
bath_pos.assign(bath)
min_depth = 0.05
bath_pos.dat.data[bath_pos.dat.data < min_depth] = min_depth
test = TestFunction(fs)
trial = TrialFunction(fs)
solution = Function(fs)
g = physical_constants['g_grav']
u = (sqrt(g * bath_pos) + u_scale)
a = inner(test, trial) * dx
l = inner(test, csize / u) * dx
solve(a == l, solution)
dt = float(solution.dat.data.min())
dt = self.comm.allreduce(dt, op=MPI.MIN)
dt *= self.compute_dx_factor()
return dt
def compute_dt_h_advection(self, u_scale):
r"""
Computes maximum explicit time step for horizontal advection
.. math :: \Delta t = \frac{\Delta x}{U_{scale}}
where :math:`U_{scale}` is estimated horizontal advective velocity.
:arg u_scale: User provided maximum horizontal velocity scale
:type u_scale: float or :class:`Constant`
"""
u = u_scale
if isinstance(u_scale, Constant):
u = u_scale.dat.data[0]
min_dx = self.fields.h_elem_size_2d.dat.data.min()
# alpha = 0.5 if self.options.element_family == 'rt-dg' else 1.0
# dt = alpha*1.0/10.0/(self.options.polynomial_degree + 1)*min_dx/u
min_dx *= self.compute_dx_factor()
dt = min_dx/u
dt = self.comm.allreduce(dt, op=MPI.MIN)
return dt
def compute_dt_v_advection(self, w_scale):
r"""
Computes maximum explicit time step for vertical advection
.. math :: \Delta t = \frac{\Delta z}{W_{scale}}
where :math:`W_{scale}` is estimated vertical advective velocity.
:arg w_scale: User provided maximum vertical velocity scale
:type w_scale: float or :class:`Constant`
"""
w = w_scale
if isinstance(w_scale, Constant):
w = w_scale.dat.data[0]
min_dz = self.fields.v_elem_size_2d.dat.data.min()
# alpha = 0.5 if self.options.element_family == 'rt-dg' else 1.0
# dt = alpha*1.0/1.5/(self.options.polynomial_degree + 1)*min_dz/w
min_dz *= self.compute_dz_factor()
dt = min_dz/w
dt = self.comm.allreduce(dt, op=MPI.MIN)
return dt
def compute_dt_diffusion(self, nu_scale):
r"""
Computes maximum explicit time step for horizontal diffusion.
.. math :: \Delta t = \alpha \frac{(\Delta x)^2}{\nu_{scale}}
where :math:`\nu_{scale}` is estimated diffusivity scale.
"""
nu = nu_scale
if isinstance(nu_scale, Constant):
nu = nu_scale.dat.data[0]
min_dx = self.fields.h_elem_size_2d.dat.data.min()
factor = 2.0
if self.options.timestepper_type == 'LeapFrog':
factor = 1.2
min_dx *= factor*self.compute_dx_factor()
dt = (min_dx)**2/nu
dt = self.comm.allreduce(dt, op=MPI.MIN)
return dt
def compute_mesh_stats(self):
"""
Computes number of elements, nodes etc and prints to sdtout
"""
nnodes = self.function_spaces.P1_2d.dim()
ntriangles = int(self.function_spaces.P1DG_2d.dim()/3)
nlayers = self.mesh.topology.layers - 1
nprisms = ntriangles*nlayers
dofs_per_elem = len(self.function_spaces.H.finat_element.entity_dofs())
ntracer_dofs = dofs_per_elem*nprisms
min_h_size = self.comm.allreduce(self.fields.h_elem_size_2d.dat.data.min(), MPI.MIN)
max_h_size = self.comm.allreduce(self.fields.h_elem_size_2d.dat.data.max(), MPI.MAX)
min_v_size = self.comm.allreduce(self.fields.v_elem_size_3d.dat.data.min(), MPI.MIN)
max_v_size = self.comm.allreduce(self.fields.v_elem_size_3d.dat.data.max(), MPI.MAX)
print_output('2D mesh: {:} nodes, {:} triangles'.format(nnodes, ntriangles))
print_output('3D mesh: {:} layers, {:} prisms'.format(nlayers, nprisms))
print_output('Horizontal element size: {:.2f} ... {:.2f} m'.format(min_h_size, max_h_size))
print_output('Vertical element size: {:.3f} ... {:.3f} m'.format(min_v_size, max_v_size))
print_output('Element family: {:}, degree: {:}'.format(self.options.element_family, self.options.polynomial_degree))
print_output('Number of tracer DOFs: {:}'.format(ntracer_dofs))
print_output('Number of cores: {:}'.format(self.comm.size))
print_output('Tracer DOFs per core: ~{:.1f}'.format(float(ntracer_dofs)/self.comm.size))
def set_time_step(self):
"""
Sets the model the model time step
If the time integrator supports automatic time step, and
:attr:`ModelOptions3d.timestepper_options.use_automatic_timestep` is
`True`, we compute the maximum time step allowed by the CFL condition.
Otherwise uses :attr:`ModelOptions3d.timestep`.
Once the time step is determined, will adjust it to be an integer
fraction of export interval ``options.simulation_export_time``.
"""
automatic_timestep = (hasattr(self.options.timestepper_options, 'use_automatic_timestep')
and self.options.timestepper_options.use_automatic_timestep)
cfl2d = self.timestepper.cfl_coeff_2d
cfl3d = self.timestepper.cfl_coeff_3d
max_dt_swe = self.compute_dt_2d(self.options.horizontal_velocity_scale)
max_dt_hadv = self.compute_dt_h_advection(self.options.horizontal_velocity_scale)
max_dt_vadv = self.compute_dt_v_advection(self.options.vertical_velocity_scale)
max_dt_diff = self.compute_dt_diffusion(self.options.horizontal_viscosity_scale)
print_output(' - dt 2d swe: {:}'.format(max_dt_swe))
print_output(' - dt h. advection: {:}'.format(max_dt_hadv))
print_output(' - dt v. advection: {:}'.format(max_dt_vadv))
print_output(' - dt viscosity: {:}'.format(max_dt_diff))
max_dt_2d = cfl2d*max_dt_swe
max_dt_3d = cfl3d*min(max_dt_hadv, max_dt_vadv, max_dt_diff)
print_output(' - CFL adjusted dt: 2D: {:} 3D: {:}'.format(max_dt_2d, max_dt_3d))
if not automatic_timestep:
print_output(' - User defined dt: 2D: {:} 3D: {:}'.format(self.options.timestep_2d, self.options.timestep))
self.dt = self.options.timestep
self.dt_2d = self.options.timestep_2d
if automatic_timestep:
assert self.options.timestep is not None
assert self.options.timestep > 0.0
assert self.options.timestep_2d is not None
assert self.options.timestep_2d > 0.0
if self.dt_mode == 'split':
if automatic_timestep:
self.dt = max_dt_3d
self.dt_2d = max_dt_2d
# compute mode split ratio and force it to be integer
self.M_modesplit = int(np.ceil(self.dt/self.dt_2d))
self.dt_2d = self.dt/self.M_modesplit
elif self.dt_mode == '2d':
if automatic_timestep:
self.dt = min(max_dt_2d, max_dt_3d)
self.dt_2d = self.dt
self.M_modesplit = 1
elif self.dt_mode == '3d':
if automatic_timestep:
self.dt = max_dt_3d
self.dt_2d = self.dt
self.M_modesplit = 1
print_output(' - chosen dt: 2D: {:} 3D: {:}'.format(self.dt_2d, self.dt))
# fit dt to export time
m_exp = int(np.ceil(self.options.simulation_export_time/self.dt))
self.dt = float(self.options.simulation_export_time)/m_exp
if self.dt_mode == 'split':
self.M_modesplit = int(np.ceil(self.dt/self.dt_2d))
self.dt_2d = self.dt/self.M_modesplit
else:
self.dt_2d = self.dt
print_output(' - adjusted dt: 2D: {:} 3D: {:}'.format(self.dt_2d, self.dt))
print_output('dt = {0:f}'.format(self.dt))
if self.dt_mode == 'split':
print_output('2D dt = {0:f} {1:d}'.format(self.dt_2d, self.M_modesplit))
sys.stdout.flush()
def create_function_spaces(self):
"""
Creates function spaces
Function spaces are accessible via :attr:`.function_spaces`
object.
"""
self._isfrozen = False
# ----- function spaces: elev in H, uv in U, mixed is W
self.function_spaces.P0 = get_functionspace(self.mesh, 'DG', 0, 'DG', 0, name='P0')
self.function_spaces.P1 = get_functionspace(self.mesh, 'CG', 1, 'CG', 1, name='P1')
self.function_spaces.P1v = get_functionspace(self.mesh, 'CG', 1, 'CG', 1, name='P1v', vector=True)
self.function_spaces.P1DG = get_functionspace(self.mesh, 'DG', 1, 'DG', 1, name='P1DG')
self.function_spaces.P1DGv = get_functionspace(self.mesh, 'DG', 1, 'DG', 1, name='P1DGv', vector=True)
# function spaces for (u,v) and w
if self.options.element_family == 'rt-dg':
self.function_spaces.U = get_functionspace(self.mesh, 'RT', self.options.polynomial_degree+1, 'DG', self.options.polynomial_degree, name='U', hdiv=True)
self.function_spaces.W = get_functionspace(self.mesh, 'DG', self.options.polynomial_degree, 'CG', self.options.polynomial_degree+1, name='W', hdiv=True)
elif self.options.element_family == 'dg-dg':
self.function_spaces.U = get_functionspace(self.mesh, 'DG', self.options.polynomial_degree, 'DG', self.options.polynomial_degree, name='U', vector=True)
self.function_spaces.W = get_functionspace(self.mesh, 'DG', self.options.polynomial_degree, 'DG', self.options.polynomial_degree, name='W', vector=True)
else:
raise Exception('Unsupported finite element family {:}'.format(self.options.element_family))
self.function_spaces.Uint = self.function_spaces.U # vertical integral of uv
# tracers
self.function_spaces.H = get_functionspace(self.mesh, 'DG', self.options.polynomial_degree, 'DG', self.options.polynomial_degree, name='H')
self.function_spaces.turb_space = self.function_spaces.P0
# 2D spaces
self.function_spaces.P1_2d = get_functionspace(self.mesh2d, 'CG', 1, name='P1_2d')
self.function_spaces.P1v_2d = get_functionspace(self.mesh2d, 'CG', 1, name='P1v_2d', vector=True)
self.function_spaces.P1DG_2d = get_functionspace(self.mesh2d, 'DG', 1, name='P1DG_2d')
self.function_spaces.P1DGv_2d = get_functionspace(self.mesh2d, 'DG', 1, name='P1DGv_2d', vector=True)
# 2D velocity space
if self.options.element_family == 'rt-dg':
self.function_spaces.U_2d = get_functionspace(self.mesh2d, 'RT', self.options.polynomial_degree+1, name='U_2d')
elif self.options.element_family == 'dg-dg':
self.function_spaces.U_2d = get_functionspace(self.mesh2d, 'DG', self.options.polynomial_degree, name='U_2d', vector=True)
self.function_spaces.H_2d = get_functionspace(self.mesh2d, 'DG', self.options.polynomial_degree, name='H_2d')
self.function_spaces.V_2d = MixedFunctionSpace([self.function_spaces.U_2d, self.function_spaces.H_2d], name='V_2d')
# define function spaces for baroclinic head and internal pressure gradient
if self.options.use_quadratic_pressure:
self.function_spaces.P2DGxP2 = get_functionspace(self.mesh, 'DG', 2, 'CG', 2, name='P2DGxP2')
self.function_spaces.P2DG_2d = get_functionspace(self.mesh2d, 'DG', 2, name='P2DG_2d')
if self.options.element_family == 'dg-dg':
self.function_spaces.P2DGxP1DGv = get_functionspace(self.mesh, 'DG', 2, 'DG', 1, name='P2DGxP1DGv', vector=True, dim=2)
self.function_spaces.H_bhead = self.function_spaces.P2DGxP2
self.function_spaces.H_bhead_2d = self.function_spaces.P2DG_2d
self.function_spaces.U_int_pg = self.function_spaces.P2DGxP1DGv
elif self.options.element_family == 'rt-dg':
self.function_spaces.H_bhead = self.function_spaces.P2DGxP2
self.function_spaces.H_bhead_2d = self.function_spaces.P2DG_2d
self.function_spaces.U_int_pg = self.function_spaces.U
else:
self.function_spaces.P1DGxP2 = get_functionspace(self.mesh, 'DG', 1, 'CG', 2, name='P1DGxP2')
self.function_spaces.H_bhead = self.function_spaces.P1DGxP2
self.function_spaces.H_bhead_2d = self.function_spaces.P1DG_2d
self.function_spaces.U_int_pg = self.function_spaces.U
self._isfrozen = True
def set_sipg_parameter(self):
r"""
Compute a penalty parameter which ensures stability of the Interior Penalty method
used for viscosity and diffusivity terms, from Epshteyn et al. 2007
(http://dx.doi.org/10.1016/j.cam.2006.08.029).
The scheme is stable if
..math::
\alpha|_K > 3*X*p*(p+1)*\cot(\theta_K),
for all elements :math:`K`, where
..math::
X = \frac{\max_{x\in K}(\nu(x))}{\min_{x\in K}(\nu(x))},
:math:`p` the degree, and :math:`\theta_K` is the minimum angle in the element.
"""
degree_h, degree_v = self.function_spaces.U.ufl_element().degree()
alpha_h = Constant(5.0*degree_h*(degree_h+1) if degree_h != 0 else 1.5)
alpha_v = Constant(5.0*degree_v*(degree_v+1) if degree_v != 0 else 1.0)
degree_h_tracer, degree_v_tracer = self.function_spaces.H.ufl_element().degree()
alpha_h_tracer = Constant(5.0*degree_h_tracer*(degree_h_tracer+1) if degree_h_tracer != 0 else 1.5)
alpha_v_tracer = Constant(5.0*degree_v_tracer*(degree_v_tracer+1) if degree_v_tracer != 0 else 1.0)
degree_h_turb, degree_v_turb = self.function_spaces.turb_space.ufl_element().degree()
alpha_h_turb = Constant(5.0*degree_h_turb*(degree_h_turb+1) if degree_h_turb != 0 else 1.5)
alpha_v_turb = Constant(5.0*degree_v_turb*(degree_v_turb+1) if degree_v_turb != 0 else 1.0)
if self.options.use_automatic_sipg_parameter:
# Compute minimum angle in 2d mesh
theta2d = get_minimum_angles_2d(self.mesh2d)
min_angle = theta2d.vector().gather().min()
print_output("Minimum angle in 2D mesh: {:.2f} degrees".format(np.rad2deg(min_angle)))
# Expand minimum angle field to extruded mesh
P0 = self.function_spaces.P0
theta = Function(P0)
ExpandFunctionTo3d(theta2d, theta).solve()
cot_theta = 1.0/tan(theta)
# Horizontal component
nu = self.options.horizontal_viscosity
if nu is not None:
self.options.sipg_parameter = Function(P0)
self.options.sipg_parameter.interpolate(alpha_h*get_sipg_ratio(nu)*cot_theta)
max_sipg = self.options.sipg_parameter.vector().gather().max()
print_output("Maximum SIPG value in horizontal: {:.2f}".format(max_sipg))
else:
print_output("SIPG parameter in horizontal: {:.2f}".format(alpha_h.values()[0]))
# Vertical component
print_output("SIPG parameter in vertical: {:.2f}".format(alpha_v.values()[0]))
# Penalty parameter for tracers / turbulence model
if self.options.solve_salinity or self.options.solve_temperature or self.options.use_turbulence:
# Horizontal component
nu = self.options.horizontal_diffusivity
if nu is not None:
scaling = get_sipg_ratio(nu)*cot_theta
if self.options.solve_salinity or self.options.solve_temperature:
self.options.sipg_parameter_tracer = Function(P0)
self.options.sipg_parameter_tracer.interpolate(alpha_h_tracer*scaling)
max_sipg = self.options.sipg_parameter_tracer.vector().gather().max()
print_output("Maximum tracer SIPG value in horizontal: {:.2f}".format(max_sipg))
if self.options.use_turbulence:
self.options.sipg_parameter_turb = Function(P0)
self.options.sipg_parameter_turb.interpolate(alpha_h_turb*scaling)
max_sipg = self.options.sipg_parameter_turb.vector().gather().max()
print_output("Maximum turbulence SIPG value in horizontal: {:.2f}".format(max_sipg))
else:
if self.options.solve_salinity or self.options.solve_temperature:
print_output("Tracer SIPG parameter in horizontal: {:.2f}".format(alpha_h_tracer.values()[0]))
if self.options.use_turbulence:
print_output("Turbulence SIPG parameter in horizontal: {:.2f}".format(alpha_h_turb.values()[0]))
# Vertical component
if self.options.solve_salinity or self.options.solve_temperature:
print_output("Tracer SIPG parameter in vertical: {:.2f}".format(alpha_v_tracer.values()[0]))
if self.options.use_turbulence:
print_output("Turbulence SIPG parameter in vertical: {:.2f}".format(alpha_v_turb.values()[0]))
else:
print_output("Using default SIPG parameters")
self.options.sipg_parameter.assign(alpha_h)
self.options.sipg_parameter_tracer.assign(alpha_h_tracer)
self.options.sipg_parameter_turb.assign(alpha_h_turb)
self.options.sipg_parameter_vertical.assign(alpha_v)
self.options.sipg_parameter_vertical_tracer.assign(alpha_v_tracer)
self.options.sipg_parameter_vertical_turb.assign(alpha_v_turb)
def create_fields(self):
"""
Creates all fields
"""
if not hasattr(self, 'U_2d'):
self.create_function_spaces()
self._isfrozen = False
if self.options.log_output and not self.options.no_exports:
logfile = os.path.join(create_directory(self.options.output_directory), 'log')
filehandler = logging.logging.FileHandler(logfile, mode='w')
filehandler.setFormatter(logging.logging.Formatter('%(message)s'))
output_logger.addHandler(filehandler)
# mesh velocity etc fields must be in the same space as 3D coordinates
coord_is_dg = element_continuity(self.mesh2d.coordinates.function_space().ufl_element()).horizontal == 'dg'
if coord_is_dg:
coord_fs = FunctionSpace(self.mesh, 'DG', 1, vfamily='CG', vdegree=1)
coord_fs_2d = self.function_spaces.P1DG_2d
else:
coord_fs = self.function_spaces.P1
coord_fs_2d = self.function_spaces.P1_2d
# ----- fields
self.fields.solution_2d = Function(self.function_spaces.V_2d)
# correct treatment of the split 2d functions
uv_2d, eta2d = self.fields.solution_2d.split()
self.fields.uv_2d = uv_2d
self.fields.elev_2d = eta2d
self.fields.elev_3d = Function(self.function_spaces.H)
self.fields.elev_cg_3d = Function(coord_fs)
self.fields.elev_cg_2d = Function(coord_fs_2d)
self.fields.uv_3d = Function(self.function_spaces.U)
self.fields.bathymetry_2d = Function(coord_fs_2d)
self.fields.bathymetry_3d = Function(coord_fs)
# z coordinate in the strecthed mesh
self.fields.z_coord_3d = Function(coord_fs)
# z coordinate in the reference mesh (eta=0)
self.fields.z_coord_ref_3d = Function(coord_fs)
self.fields.uv_dav_3d = Function(self.function_spaces.U)
self.fields.uv_dav_2d = Function(self.function_spaces.U_2d)
self.fields.split_residual_2d = Function(self.function_spaces.U_2d)
self.fields.uv_mag_3d = Function(self.function_spaces.P0)
self.fields.uv_p1_3d = Function(self.function_spaces.P1v)
self.fields.w_3d = Function(self.function_spaces.W)
self.fields.hcc_metric_3d = Function(self.function_spaces.P1DG, name='mesh consistency')
if self.options.use_ale_moving_mesh:
self.fields.w_mesh_3d = Function(coord_fs)
self.fields.w_mesh_surf_3d = Function(coord_fs)
self.fields.w_mesh_surf_2d = Function(coord_fs_2d)
if self.options.solve_salinity:
self.fields.salt_3d = Function(self.function_spaces.H, name='Salinity')
if self.options.solve_temperature:
self.fields.temp_3d = Function(self.function_spaces.H, name='Temperature')
if self.options.use_baroclinic_formulation:
if self.options.use_quadratic_density:
self.fields.density_3d = Function(self.function_spaces.P2DGxP2, name='Density')
else:
self.fields.density_3d = Function(self.function_spaces.H, name='Density')
self.fields.baroc_head_3d = Function(self.function_spaces.H_bhead)
self.fields.int_pg_3d = Function(self.function_spaces.U_int_pg, name='int_pg_3d')
if self.options.coriolis_frequency is not None:
if isinstance(self.options.coriolis_frequency, Constant):
self.fields.coriolis_3d = self.options.coriolis_frequency
else:
self.fields.coriolis_3d = Function(self.function_spaces.P1)
ExpandFunctionTo3d(self.options.coriolis_frequency, self.fields.coriolis_3d).solve()
if self.options.wind_stress is not None:
if isinstance(self.options.wind_stress, Function):
assert self.options.wind_stress.function_space().mesh().geometric_dimension() == 3, \
'wind stress field must be a 3D function'
self.fields.wind_stress_3d = self.options.wind_stress
elif isinstance(self.options.wind_stress, Constant):
self.fields.wind_stress_3d = self.options.wind_stress
else:
raise Exception('Unsupported wind stress type: {:}'.format(type(self.options.wind_stress)))
self.fields.v_elem_size_3d = Function(self.function_spaces.P1DG)
self.fields.v_elem_size_2d = Function(self.function_spaces.P1DG_2d)
self.fields.h_elem_size_3d = Function(self.function_spaces.P1)
self.fields.h_elem_size_2d = Function(self.function_spaces.P1_2d)
get_horizontal_elem_size_3d(self.fields.h_elem_size_2d, self.fields.h_elem_size_3d)
self.fields.max_h_diff = Function(self.function_spaces.P1)
if self.options.use_smagorinsky_viscosity:
self.fields.smag_visc_3d = Function(self.function_spaces.P1)
if self.options.use_limiter_for_tracers and self.options.polynomial_degree > 0:
self.tracer_limiter = limiter.VertexBasedP1DGLimiter(self.function_spaces.H)
else:
self.tracer_limiter = None
if (self.options.use_limiter_for_velocity
and self.options.polynomial_degree > 0
and self.options.element_family == 'dg-dg'):
self.uv_limiter = limiter.VertexBasedP1DGLimiter(self.function_spaces.U)
else:
self.uv_limiter = None
if self.options.use_turbulence:
if self.options.turbulence_model_type == 'gls':
# NOTE tke and psi should be in H as tracers ??
self.fields.tke_3d = Function(self.function_spaces.turb_space)
self.fields.psi_3d = Function(self.function_spaces.turb_space)
# NOTE other turb. quantities should share the same nodes ??
self.fields.eps_3d = Function(self.function_spaces.turb_space)
self.fields.len_3d = Function(self.function_spaces.turb_space)
self.fields.eddy_visc_3d = Function(self.function_spaces.turb_space)
self.fields.eddy_diff_3d = Function(self.function_spaces.turb_space)
# NOTE M2 and N2 depend on d(.)/dz -> use CG in vertical ?
self.fields.shear_freq_3d = Function(self.function_spaces.turb_space)
self.fields.buoy_freq_3d = Function(self.function_spaces.turb_space)
self.turbulence_model = turbulence.GenericLengthScaleModel(
weakref.proxy(self),
self.fields.tke_3d,
self.fields.psi_3d,
self.fields.uv_3d,
self.fields.get('density_3d'),
self.fields.len_3d,
self.fields.eps_3d,
self.fields.eddy_diff_3d,
self.fields.eddy_visc_3d,
self.fields.buoy_freq_3d,
self.fields.shear_freq_3d,
options=self.options.turbulence_model_options)
elif self.options.turbulence_model_type == 'pacanowski':
self.fields.eddy_visc_3d = Function(self.function_spaces.turb_space)
self.fields.eddy_diff_3d = Function(self.function_spaces.turb_space)
self.fields.shear_freq_3d = Function(self.function_spaces.turb_space)
self.fields.buoy_freq_3d = Function(self.function_spaces.turb_space)
self.turbulence_model = turbulence.PacanowskiPhilanderModel(
weakref.proxy(self),
self.fields.uv_3d,
self.fields.get('density_3d'),
self.fields.eddy_diff_3d,
self.fields.eddy_visc_3d,
self.fields.buoy_freq_3d,
self.fields.shear_freq_3d,
options=self.options.turbulence_model_options)
else:
raise Exception('Unsupported turbulence model: {:}'.format(self.options.turbulence_model))
else:
self.turbulence_model = None
# copute total viscosity/diffusivity
self.tot_h_visc = SumFunction()
self.tot_h_visc.add(self.options.horizontal_viscosity)
self.tot_h_visc.add(self.fields.get('smag_visc_3d'))
self.tot_v_visc = SumFunction()
self.tot_v_visc.add(self.options.vertical_viscosity)
self.tot_v_visc.add(self.fields.get('eddy_visc_3d'))
self.tot_h_diff = SumFunction()
self.tot_h_diff.add(self.options.horizontal_diffusivity)
self.tot_v_diff = SumFunction()
self.tot_v_diff.add(self.options.vertical_diffusivity)
self.tot_v_diff.add(self.fields.get('eddy_diff_3d'))
self._isfrozen = True
def create_equations(self):
"""
Creates all dynamic equations and time integrators
"""
if 'uv_3d' not in self.fields:
self.create_fields()
self._isfrozen = False
if self.options.log_output and not self.options.no_exports:
logfile = os.path.join(create_directory(self.options.output_directory), 'log')
filehandler = logging.logging.FileHandler(logfile, mode='w')
filehandler.setFormatter(logging.logging.Formatter('%(message)s'))
output_logger.addHandler(filehandler)
self.set_sipg_parameter()
self.depth = DepthExpression(self.fields.bathymetry_2d,
use_nonlinear_equations=self.options.use_nonlinear_equations)
self.eq_sw = shallowwater_eq.ModeSplit2DEquations(
self.fields.solution_2d.function_space(),
self.depth, self.options)
expl_bottom_friction = self.options.use_bottom_friction and not self.options.use_implicit_vertical_diffusion
self.eq_momentum = momentum_eq.MomentumEquation(self.fields.uv_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_nonlinear_equations=self.options.use_nonlinear_equations,
use_lax_friedrichs=self.options.use_lax_friedrichs_velocity,
use_bottom_friction=expl_bottom_friction,
sipg_parameter=self.options.sipg_parameter,
sipg_parameter_vertical=self.options.sipg_parameter_vertical)
if self.options.use_implicit_vertical_diffusion:
self.eq_vertmomentum = momentum_eq.MomentumEquation(self.fields.uv_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_nonlinear_equations=False,
use_lax_friedrichs=self.options.use_lax_friedrichs_velocity,
use_bottom_friction=self.options.use_bottom_friction,
sipg_parameter=self.options.sipg_parameter,
sipg_parameter_vertical=self.options.sipg_parameter_vertical)
if self.options.solve_salinity:
self.eq_salt = tracer_eq.TracerEquation(self.fields.salt_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
use_symmetric_surf_bnd=self.options.element_family == 'dg-dg',
sipg_parameter=self.options.sipg_parameter_tracer,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_tracer)
if self.options.use_implicit_vertical_diffusion:
self.eq_salt_vdff = tracer_eq.TracerEquation(self.fields.salt_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
sipg_parameter=self.options.sipg_parameter_tracer,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_tracer)
if self.options.solve_temperature:
self.eq_temp = tracer_eq.TracerEquation(self.fields.temp_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
use_symmetric_surf_bnd=self.options.element_family == 'dg-dg',
sipg_parameter=self.options.sipg_parameter_tracer,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_tracer)
if self.options.use_implicit_vertical_diffusion:
self.eq_temp_vdff = tracer_eq.TracerEquation(self.fields.temp_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
sipg_parameter=self.options.sipg_parameter_tracer,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_tracer)
self.eq_sw.bnd_functions = self.bnd_functions['shallow_water']
self.eq_momentum.bnd_functions = self.bnd_functions['momentum']
if self.options.solve_salinity:
self.eq_salt.bnd_functions = self.bnd_functions['salt']
if self.options.solve_temperature:
self.eq_temp.bnd_functions = self.bnd_functions['temp']
if self.options.use_turbulence and self.options.turbulence_model_type == 'gls':
if self.options.use_turbulence_advection:
# explicit advection equations
self.eq_tke_adv = tracer_eq.TracerEquation(self.fields.tke_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
sipg_parameter=self.options.sipg_parameter_turb,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_turb)
self.eq_psi_adv = tracer_eq.TracerEquation(self.fields.psi_3d.function_space(),
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d,
use_lax_friedrichs=self.options.use_lax_friedrichs_tracer,
sipg_parameter=self.options.sipg_parameter_turb,
sipg_parameter_vertical=self.options.sipg_parameter_vertical_turb)
# implicit vertical diffusion eqn with production terms
self.eq_tke_diff = turbulence.TKEEquation(self.fields.tke_3d.function_space(),
self.turbulence_model,
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d)
self.eq_psi_diff = turbulence.PsiEquation(self.fields.psi_3d.function_space(),
self.turbulence_model,
bathymetry=self.fields.bathymetry_3d,
v_elem_size=self.fields.v_elem_size_3d,
h_elem_size=self.fields.h_elem_size_3d)
# ----- Time integrators
self.dt_mode = '3d' # 'split'|'2d'|'3d' use constant 2d/3d dt, or split
if self.options.timestepper_type == 'LeapFrog':
self.timestepper = coupled_timeintegrator.CoupledLeapFrogAM3(weakref.proxy(self))
elif self.options.timestepper_type == 'SSPRK22':
self.timestepper = coupled_timeintegrator.CoupledTwoStageRK(weakref.proxy(self))
else:
raise Exception('Unknown time integrator type: '+str(self.options.timestepper_type))
# ----- File exporters
# create export_managers and store in a list
self.exporters = OrderedDict()
if not self.options.no_exports:
e = exporter.ExportManager(self.options.output_directory,
self.options.fields_to_export,
self.fields,
field_metadata,
export_type='vtk',
verbose=self.options.verbose > 0)
self.exporters['vtk'] = e
hdf5_dir = os.path.join(self.options.output_directory, 'hdf5')
e = exporter.ExportManager(hdf5_dir,
self.options.fields_to_export_hdf5,
self.fields,
field_metadata,
export_type='hdf5',
verbose=self.options.verbose > 0)
self.exporters['hdf5'] = e
# ----- Operators
tot_uv_3d = self.fields.uv_3d + self.fields.uv_dav_3d
self.w_solver = VerticalVelocitySolver(self.fields.w_3d,
tot_uv_3d,
self.fields.bathymetry_3d,
self.eq_momentum.bnd_functions)
self.uv_averager = VerticalIntegrator(self.fields.uv_3d,
self.fields.uv_dav_3d,
bottom_to_top=True,
bnd_value=Constant((0.0, 0.0, 0.0)),
average=True,
bathymetry=self.fields.bathymetry_3d,
elevation=self.fields.elev_cg_3d)
if self.options.use_baroclinic_formulation:
if self.options.solve_salinity:
s = self.fields.salt_3d
else:
s = self.options.constant_salinity
if self.options.solve_temperature:
t = self.fields.temp_3d
else:
t = self.options.constant_temperature
if self.options.equation_of_state_type == 'linear':
eos_options = self.options.equation_of_state_options
self.equation_of_state = LinearEquationOfState(eos_options.rho_ref,
eos_options.alpha,
eos_options.beta,
eos_options.th_ref,
eos_options.s_ref)
else:
self.equation_of_state = JackettEquationOfState()
if self.options.use_quadratic_density:
self.density_solver = DensitySolverWeak(s, t, self.fields.density_3d,
self.equation_of_state)
else:
self.density_solver = DensitySolver(s, t, self.fields.density_3d,
self.equation_of_state)
self.rho_integrator = VerticalIntegrator(self.fields.density_3d,
self.fields.baroc_head_3d,
bottom_to_top=False,
average=False,
bathymetry=self.fields.bathymetry_3d,
elevation=self.fields.elev_cg_3d)
self.int_pg_calculator = momentum_eq.InternalPressureGradientCalculator(
self.fields, self.options,
self.bnd_functions['momentum'],
solver_parameters=self.options.timestepper_options.solver_parameters_momentum_explicit)
self.extract_surf_dav_uv = SubFunctionExtractor(self.fields.uv_dav_3d,
self.fields.uv_dav_2d,
boundary='top', elem_facet='top',
elem_height=self.fields.v_elem_size_2d)
self.copy_elev_to_3d = ExpandFunctionTo3d(self.fields.elev_2d, self.fields.elev_3d)
self.copy_elev_cg_to_3d = ExpandFunctionTo3d(self.fields.elev_cg_2d, self.fields.elev_cg_3d)
self.copy_uv_dav_to_uv_dav_3d = ExpandFunctionTo3d(self.fields.uv_dav_2d, self.fields.uv_dav_3d,
elem_height=self.fields.v_elem_size_3d)
self.copy_uv_to_uv_dav_3d = ExpandFunctionTo3d(self.fields.uv_2d, self.fields.uv_dav_3d,
elem_height=self.fields.v_elem_size_3d)
self.uv_mag_solver = VelocityMagnitudeSolver(self.fields.uv_mag_3d, u=self.fields.uv_3d)
self.mesh_updater = ALEMeshUpdater(self)
if self.options.use_smagorinsky_viscosity:
self.smagorinsky_diff_solver = SmagorinskyViscosity(self.fields.uv_p1_3d, self.fields.smag_visc_3d,
self.options.smagorinsky_coefficient, self.fields.h_elem_size_3d,
self.fields.max_h_diff,
weak_form=self.options.polynomial_degree == 0)
self.uv_p1_projector = Projector(self.fields.uv_3d, self.fields.uv_p1_3d)
self.elev_3d_to_cg_projector = Projector(self.fields.elev_3d, self.fields.elev_cg_3d)
self.elev_2d_to_cg_projector = Projector(self.fields.elev_2d, self.fields.elev_cg_2d)
# ----- set initial values
self.fields.bathymetry_2d.project(self.bathymetry_cg_2d)
ExpandFunctionTo3d(self.fields.bathymetry_2d, self.fields.bathymetry_3d).solve()
self.mesh_updater.initialize()
self.compute_mesh_stats()
self.set_time_step()
self.timestepper.set_dt(self.dt, self.dt_2d)
# compute maximal diffusivity for explicit schemes
degree_h, degree_v = self.function_spaces.H.ufl_element().degree()
max_diff_alpha = 1.0/60.0/max((degree_h*(degree_h + 1)), 1.0) # FIXME depends on element type and order
self.fields.max_h_diff.assign(max_diff_alpha/self.dt * self.fields.h_elem_size_3d**2)
d = self.fields.max_h_diff.dat.data
print_output('max h diff {:} - {:}'.format(d.min(), d.max()))
self.next_export_t = self.simulation_time + self.options.simulation_export_time
self._initialized = True
self._isfrozen = True
def assign_initial_conditions(self, elev=None, salt=None, temp=None,
uv_2d=None, uv_3d=None, tke=None, psi=None):
"""
Assigns initial conditions
:kwarg elev: Initial condition for water elevation
:type elev: scalar 2D :class:`Function`, :class:`Constant`, or an expression
:kwarg salt: Initial condition for salinity field
:type salt: scalar 3D :class:`Function`, :class:`Constant`, or an expression
:kwarg temp: Initial condition for temperature field
:type temp: scalar 3D :class:`Function`, :class:`Constant`, or an expression
:kwarg uv_2d: Initial condition for depth averaged velocity
:type uv_2d: vector valued 2D :class:`Function`, :class:`Constant`, or an expression
:kwarg uv_3d: Initial condition for horizontal velocity
:type uv_3d: vector valued 3D :class:`Function`, :class:`Constant`, or an expression
:kwarg tke: Initial condition for turbulent kinetic energy field
:type tke: scalar 3D :class:`Function`, :class:`Constant`, or an expression
:kwarg psi: Initial condition for turbulence generic length scale field
:type psi: scalar 3D :class:`Function`, :class:`Constant`, or an expression
"""
if not self._initialized:
self.create_equations()
if elev is not None:
self.fields.elev_2d.project(elev)
if uv_2d is not None:
self.fields.uv_2d.project(uv_2d)
self.fields.uv_dav_2d.project(uv_2d)
if uv_3d is None:
ExpandFunctionTo3d(self.fields.uv_2d, self.fields.uv_dav_3d,
elem_height=self.fields.v_elem_size_3d).solve()
if uv_3d is not None:
self.fields.uv_3d.project(uv_3d)
if salt is not None and self.options.solve_salinity:
self.fields.salt_3d.project(salt)
if temp is not None and self.options.solve_temperature:
self.fields.temp_3d.project(temp)
if self.options.use_turbulence and self.options.turbulence_model_type == 'gls':
if tke is not None:
self.fields.tke_3d.project(tke)
if psi is not None:
self.fields.psi_3d.project(psi)
self.turbulence_model.initialize()
if self.options.use_ale_moving_mesh:
self.timestepper._update_3d_elevation()
self.timestepper._update_moving_mesh()
self.timestepper.initialize()
# update all diagnostic variables
self.timestepper._update_all_dependencies(self.simulation_time,
do_2d_coupling=False,
do_vert_diffusion=False,
do_ale_update=True,
do_stab_params=True,
do_turbulence=False)
if self.options.use_turbulence:
self.turbulence_model.initialize()
def add_callback(self, callback, eval_interval='export'):
"""
Adds callback to solver object
:arg callback: :class:`.DiagnosticCallback` instance
:kwarg str eval_interval: Determines when callback will be evaluated,
either 'export' or 'timestep' for evaluating after each export or
time step.
"""
self.callbacks.add(callback, eval_interval)
def export(self):
"""
Export all fields to disk
Also evaluates all callbacks set to 'export' interval.
"""
self.callbacks.evaluate(mode='export', index=self.i_export)
# set uv to total uv instead of deviation from depth average
# TODO find a cleaner way of doing this ...
self.fields.uv_3d += self.fields.uv_dav_3d
for e in self.exporters.values():
e.export()
# restore uv_3d
self.fields.uv_3d -= self.fields.uv_dav_3d
def load_state(self, i_export, outputdir=None, t=None, iteration=None):
"""
Loads simulation state from hdf5 outputs.
This replaces :meth:`.assign_initial_conditions` in model initilization.
This assumes that model setup is kept the same (e.g. time step) and
all pronostic state variables are exported in hdf5 format. The required
state variables are: elev_2d, uv_2d, uv_3d, salt_3d, temp_3d, tke_3d,
psi_3d
Currently hdf5 field import only works for the same number of MPI
processes.
:arg int i_export: export index to load
:kwarg string outputdir: (optional) directory where files are read from.
By default ``options.output_directory``.
:kwarg float t: simulation time. Overrides the time stamp stored in the
hdf5 files.
:kwarg int iteration: Overrides the iteration count in the hdf5 files.
"""
if not self._initialized:
self.create_equations()
if outputdir is None:
outputdir = self.options.output_directory
self._simulation_continued = True
# create new ExportManager with desired outputdir
state_fields = ['uv_2d', 'elev_2d', 'uv_3d',
'salt_3d', 'temp_3d', 'tke_3d', 'psi_3d']
hdf5_dir = os.path.join(outputdir, 'hdf5')
e = exporter.ExportManager(hdf5_dir,
state_fields,
self.fields,
field_metadata,
export_type='hdf5',
verbose=self.options.verbose > 0)
e.exporters['uv_2d'].load(i_export, self.fields.uv_2d)
e.exporters['elev_2d'].load(i_export, self.fields.elev_2d)
e.exporters['uv_3d'].load(i_export, self.fields.uv_3d)
# NOTE remove mean from uv_3d
self.timestepper._remove_depth_average_from_uv_3d()
salt = temp = tke = psi = None
if self.options.solve_salinity:
salt = self.fields.salt_3d
e.exporters['salt_3d'].load(i_export, salt)
if self.options.solve_temperature:
temp = self.fields.temp_3d
e.exporters['temp_3d'].load(i_export, temp)
if self.options.use_turbulence:
if 'tke_3d' in self.fields:
tke = self.fields.tke_3d
e.exporters['tke_3d'].load(i_export, tke)
if 'psi_3d' in self.fields:
psi = self.fields.psi_3d
e.exporters['psi_3d'].load(i_export, psi)
self.assign_initial_conditions(elev=self.fields.elev_2d,
uv_2d=self.fields.uv_2d,
uv_3d=self.fields.uv_3d,
salt=salt, temp=temp,
tke=tke, psi=psi,
)
# time stepper bookkeeping for export time step
self.i_export = i_export
self.next_export_t = self.i_export*self.options.simulation_export_time
if iteration is None:
iteration = int(np.ceil(self.next_export_t/self.dt))
if t is None:
t = iteration*self.dt
self.iteration = iteration
self.simulation_time = t
# for next export
self.export_initial_state = outputdir != self.options.output_directory
if self.export_initial_state:
offset = 0
else:
offset = 1
self.next_export_t += self.options.simulation_export_time
for e in self.exporters.values():
e.set_next_export_ix(self.i_export + offset)
def print_state(self, cputime):
"""
Print a summary of the model state on stdout
:arg float cputime: Measured CPU time
"""
norm_h = norm(self.fields.elev_2d)
norm_u = norm(self.fields.uv_3d)
line = ('{iexp:5d} {i:5d} T={t:10.2f} '
'eta norm: {e:10.4f} u norm: {u:10.4f} {cpu:5.2f}')
print_output(line.format(iexp=self.i_export, i=self.iteration,
t=self.simulation_time, e=norm_h,
u=norm_u, cpu=cputime))
sys.stdout.flush()
def _print_field(self, field):
"""
Prints min/max values of a field for debugging.
:arg field: a :class:`Function` or a field string, e.g. 'salt_3d'
"""
if isinstance(field, str):
_field = self.fields[field]
else:
_field = field
minval = float(_field.dat.data.min())
minval = self.comm.allreduce(minval, op=MPI.MIN)
maxval = float(_field.dat.data.max())
maxval = self.comm.allreduce(maxval, op=MPI.MAX)
print_output(' {:}: {:.4f} {:.4f}'.format(_field.name(), minval, maxval))
def print_state_debug(self):
"""
Print min/max values of prognostic/diagnostic fields for debugging.
"""
field_list = [
'elev_2d', 'uv_2d', 'elev_cg_2d',
'elev_3d', 'uv_3d',
'w_3d', 'uv_dav_3d', 'w_mesh_3d',
'salt_3d', 'temp_3d', 'density_3d',
'baroc_head_3d', 'int_pg_3d',
'psi_3d', 'eps_3d', 'eddy_visc_3d',
'shear_freq_3d', 'buoy_freq_3d',
'coriolis_2d', 'coriolis_3d',
'wind_stress_3d',
]
print_output('{:06} T={:10.2f}'.format(self.iteration, self.simulation_time))
for fieldname in field_list:
if (fieldname in self.fields
and isinstance(self.fields[fieldname], Function)):
self._print_field(self.fields[fieldname])
self.comm.barrier()
def iterate(self, update_forcings=None, update_forcings3d=None,
export_func=None):
"""
Runs the simulation
Iterates over the time loop until time ``options.simulation_end_time`` is reached.
Exports fields to disk on ``options.simulation_export_time`` intervals.
:kwarg update_forcings: User-defined function that takes simulation
time as an argument and updates time-dependent boundary conditions
of the 2D system (if any).
:kwarg update_forcings_3d: User-defined function that takes simulation
time as an argument and updates time-dependent boundary conditions
of the 3D equations (if any).
:kwarg export_func: User-defined function (with no arguments) that will
be called on every export.
"""
if not self._initialized:
self.create_equations()
self.options.check_salinity_conservation &= self.options.solve_salinity
self.options.check_salinity_overshoot &= self.options.solve_salinity
self.options.check_temperature_conservation &= self.options.solve_temperature
self.options.check_temperature_overshoot &= self.options.solve_temperature
self.options.check_volume_conservation_3d &= self.options.use_ale_moving_mesh
self.options.use_limiter_for_tracers &= self.options.polynomial_degree > 0
self.options.use_limiter_for_velocity &= self.options.polynomial_degree > 0
self.options.use_limiter_for_velocity &= self.options.element_family == 'dg-dg'
t_epsilon = 1.0e-5
cputimestamp = time_mod.clock()
dump_hdf5 = self.options.export_diagnostics and not self.options.no_exports
if self.options.check_volume_conservation_2d:
c = callback.VolumeConservation2DCallback(self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self.options.check_volume_conservation_3d:
c = callback.VolumeConservation3DCallback(self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self.options.check_salinity_conservation:
c = callback.TracerMassConservationCallback('salt_3d',
self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self.options.check_salinity_overshoot:
c = callback.TracerOvershootCallBack('salt_3d',
self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self.options.check_temperature_conservation:
c = callback.TracerMassConservationCallback('temp_3d',
self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self.options.check_temperature_overshoot:
c = callback.TracerOvershootCallBack('temp_3d',
self,
export_to_hdf5=dump_hdf5,
append_to_log=True)
self.add_callback(c, eval_interval='export')
if self._simulation_continued:
# set all callbacks to append mode
for m in self.callbacks:
for k in self.callbacks[m]:
self.callbacks[m][k].set_write_mode('append')
# initial export
self.print_state(0.0)
if self.export_initial_state:
self.export()
if export_func is not None:
export_func()
if 'vtk' in self.exporters:
self.exporters['vtk'].export_bathymetry(self.fields.bathymetry_2d)
initial_simulation_time = self.simulation_time
internal_iteration = 0
while self.simulation_time <= self.options.simulation_end_time - t_epsilon:
self.timestepper.advance(self.simulation_time,
update_forcings, update_forcings3d)
# Move to next time step
self.iteration += 1
internal_iteration += 1
self.simulation_time = initial_simulation_time + internal_iteration*self.dt
self.callbacks.evaluate(mode='timestep')
# Write the solution to file
if self.simulation_time >= self.next_export_t - t_epsilon:
self.i_export += 1
self.next_export_t += self.options.simulation_export_time
cputime = time_mod.clock() - cputimestamp
cputimestamp = time_mod.clock()
self.print_state(cputime)
self.export()
if export_func is not None:
export_func()
| tkarna/cofs | thetis/solver.py | Python | mit | 64,942 |
#!/usr/bin/env python
#
# vector3 and rotation matrix classes
# This follows the conventions in the ArduPilot code,
# and is essentially a python version of the AP_Math library
#
# Andrew Tridgell, March 2012
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''rotation matrix class
'''
from math import sin, cos, sqrt, asin, atan2, pi, radians, acos
class Vector3:
'''a vector'''
def __init__(self, x=None, y=None, z=None):
if x != None and y != None and z != None:
self.x = float(x)
self.y = float(y)
self.z = float(z)
elif x != None and len(x) == 3:
self.x = float(x[0])
self.y = float(x[1])
self.z = float(x[2])
elif x != None:
raise ValueError('bad initialiser')
else:
self.x = float(0)
self.y = float(0)
self.z = float(0)
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __add__(self, v):
return Vector3(self.x + v.x,
self.y + v.y,
self.z + v.z)
__radd__ = __add__
def __sub__(self, v):
return Vector3(self.x - v.x,
self.y - v.y,
self.z - v.z)
def __neg__(self):
return Vector3(-self.x, -self.y, -self.z)
def __rsub__(self, v):
return Vector3(v.x - self.x,
v.y - self.y,
v.z - self.z)
def __mul__(self, v):
if isinstance(v, Vector3):
'''dot product'''
return self.x*v.x + self.y*v.y + self.z*v.z
return Vector3(self.x * v,
self.y * v,
self.z * v)
__rmul__ = __mul__
def __div__(self, v):
return Vector3(self.x / v,
self.y / v,
self.z / v)
def __mod__(self, v):
'''cross product'''
return Vector3(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __copy__(self):
return Vector3(self.x, self.y, self.z)
copy = __copy__
def length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def zero(self):
self.x = self.y = self.z = 0
def angle(self, v):
'''return the angle between this vector and another vector'''
return acos(self * v) / (self.length() * v.length())
def normalized(self):
return self / self.length()
def normalize(self):
v = self.normalized()
self.x = v.x
self.y = v.y
self.z = v.z
class Matrix3:
'''a 3x3 matrix, intended as a rotation matrix'''
def __init__(self, a=None, b=None, c=None):
if a is not None and b is not None and c is not None:
self.a = a.copy()
self.b = b.copy()
self.c = c.copy()
else:
self.identity()
def __repr__(self):
return 'Matrix3((%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f))' % (
self.a.x, self.a.y, self.a.z,
self.b.x, self.b.y, self.b.z,
self.c.x, self.c.y, self.c.z)
def identity(self):
self.a = Vector3(1,0,0)
self.b = Vector3(0,1,0)
self.c = Vector3(0,0,1)
def transposed(self):
return Matrix3(Vector3(self.a.x, self.b.x, self.c.x),
Vector3(self.a.y, self.b.y, self.c.y),
Vector3(self.a.z, self.b.z, self.c.z))
def from_euler(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians'''
cp = cos(pitch)
sp = sin(pitch)
sr = sin(roll)
cr = cos(roll)
sy = sin(yaw)
cy = cos(yaw)
self.a.x = cp * cy
self.a.y = (sr * sp * cy) - (cr * sy)
self.a.z = (cr * sp * cy) + (sr * sy)
self.b.x = cp * sy
self.b.y = (sr * sp * sy) + (cr * cy)
self.b.z = (cr * sp * sy) - (sr * cy)
self.c.x = -sp
self.c.y = sr * cp
self.c.z = cr * cp
def to_euler(self):
'''find Euler angles (321 convention) for the matrix'''
if self.c.x >= 1.0:
pitch = pi
elif self.c.x <= -1.0:
pitch = -pi
else:
pitch = -asin(self.c.x)
roll = atan2(self.c.y, self.c.z)
yaw = atan2(self.b.x, self.a.x)
return (roll, pitch, yaw)
def to_euler312(self):
'''find Euler angles (312 convention) for the matrix.
See http://www.atacolorado.com/eulersequences.doc
'''
T21 = self.a.y
T22 = self.b.y
T23 = self.c.y
T13 = self.c.x
T33 = self.c.z
yaw = atan2(-T21, T22)
roll = asin(T23)
pitch = atan2(-T13, T33)
return (roll, pitch, yaw)
def from_euler312(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians in 312 convention'''
c3 = cos(pitch)
s3 = sin(pitch)
s2 = sin(roll)
c2 = cos(roll)
s1 = sin(yaw)
c1 = cos(yaw)
self.a.x = c1 * c3 - s1 * s2 * s3
self.b.y = c1 * c2
self.c.z = c3 * c2
self.a.y = -c2*s1
self.a.z = s3*c1 + c3*s2*s1
self.b.x = c3*s1 + s3*s2*c1
self.b.z = s1*s3 - s2*c1*c3
self.c.x = -s3*c2
self.c.y = s2
def __add__(self, m):
return Matrix3(self.a + m.a, self.b + m.b, self.c + m.c)
__radd__ = __add__
def __sub__(self, m):
return Matrix3(self.a - m.a, self.b - m.b, self.c - m.c)
def __rsub__(self, m):
return Matrix3(m.a - self.a, m.b - self.b, m.c - self.c)
def __mul__(self, other):
if isinstance(other, Vector3):
v = other
return Vector3(self.a.x * v.x + self.a.y * v.y + self.a.z * v.z,
self.b.x * v.x + self.b.y * v.y + self.b.z * v.z,
self.c.x * v.x + self.c.y * v.y + self.c.z * v.z)
elif isinstance(other, Matrix3):
m = other
return Matrix3(Vector3(self.a.x * m.a.x + self.a.y * m.b.x + self.a.z * m.c.x,
self.a.x * m.a.y + self.a.y * m.b.y + self.a.z * m.c.y,
self.a.x * m.a.z + self.a.y * m.b.z + self.a.z * m.c.z),
Vector3(self.b.x * m.a.x + self.b.y * m.b.x + self.b.z * m.c.x,
self.b.x * m.a.y + self.b.y * m.b.y + self.b.z * m.c.y,
self.b.x * m.a.z + self.b.y * m.b.z + self.b.z * m.c.z),
Vector3(self.c.x * m.a.x + self.c.y * m.b.x + self.c.z * m.c.x,
self.c.x * m.a.y + self.c.y * m.b.y + self.c.z * m.c.y,
self.c.x * m.a.z + self.c.y * m.b.z + self.c.z * m.c.z))
v = other
return Matrix3(self.a * v, self.b * v, self.c * v)
def __div__(self, v):
return Matrix3(self.a / v, self.b / v, self.c / v)
def __neg__(self):
return Matrix3(-self.a, -self.b, -self.c)
def __copy__(self):
return Matrix3(self.a, self.b, self.c)
copy = __copy__
def rotate(self, g):
'''rotate the matrix by a given amount on 3 axes'''
temp_matrix = Matrix3()
a = self.a
b = self.b
c = self.c
temp_matrix.a.x = a.y * g.z - a.z * g.y
temp_matrix.a.y = a.z * g.x - a.x * g.z
temp_matrix.a.z = a.x * g.y - a.y * g.x
temp_matrix.b.x = b.y * g.z - b.z * g.y
temp_matrix.b.y = b.z * g.x - b.x * g.z
temp_matrix.b.z = b.x * g.y - b.y * g.x
temp_matrix.c.x = c.y * g.z - c.z * g.y
temp_matrix.c.y = c.z * g.x - c.x * g.z
temp_matrix.c.z = c.x * g.y - c.y * g.x
self.a += temp_matrix.a
self.b += temp_matrix.b
self.c += temp_matrix.c
def normalize(self):
'''re-normalise a rotation matrix'''
error = self.a * self.b
t0 = self.a - (self.b * (0.5 * error))
t1 = self.b - (self.a * (0.5 * error))
t2 = t0 % t1
self.a = t0 * (1.0 / t0.length())
self.b = t1 * (1.0 / t1.length())
self.c = t2 * (1.0 / t2.length())
def trace(self):
'''the trace of the matrix'''
return self.a.x + self.b.y + self.c.z
def test_euler():
'''check that from_euler() and to_euler() are consistent'''
m = Matrix3()
from math import radians, degrees
for r in range(-179, 179, 3):
for p in range(-89, 89, 3):
for y in range(-179, 179, 3):
m.from_euler(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler()
v1 = Vector3(r,p,y)
v2 = Vector3(degrees(r2),degrees(p2),degrees(y2))
diff = v1 - v2
if diff.length() > 1.0e-12:
print('EULER ERROR:', v1, v2, diff.length())
def test_euler312_single(r,p,y):
'''check that from_euler312() and to_euler312() are consistent for one set of values'''
from math import degrees, radians
m = Matrix3()
m.from_euler312(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler312()
v1 = Vector3(r,p,y)
v2 = Vector3(degrees(r2),degrees(p2),degrees(y2))
diff = v1 - v2
if diff.length() > 1.0e-12:
print('EULER ERROR:', v1, v2, diff.length())
def test_one_axis(r,p,y):
'''check that from_euler312() and from_euler() are consistent for one set of values on one axis'''
from math import degrees, radians
m = Matrix3()
m.from_euler312(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler()
v1 = Vector3(r,p,y)
v2 = Vector3(degrees(r2),degrees(p2),degrees(y2))
diff = v1 - v2
if diff.length() > 1.0e-12:
print('EULER ERROR:', v1, v2, diff.length())
def test_euler312():
'''check that from_euler312() and to_euler312() are consistent'''
m = Matrix3()
for x in range(-89, 89, 3):
test_one_axis(x, 0, 0)
test_one_axis(0, x, 0)
test_one_axis(0, 0, x)
for r in range(-89, 89, 3):
for p in range(-179, 179, 3):
for y in range(-179, 179, 3):
test_euler312_single(r,p,y)
if __name__ == "__main__":
import doctest
doctest.testmod()
test_euler()
test_euler312()
| Yndal/ArduPilot-SensorPlatform | ardupilot/Tools/autotest/pysim/rotmat.py | Python | mit | 10,966 |
#
# ===============================================================
# Description: Test performance of dynamic repartitioning on
# clustering program.
#
# Created: 12/08/2013 01:09:08 PM
#
# Author: Ayush Dubey, dubey@cs.cornell.edu
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import random
import sys
import time
import threading
import weaver.client as client
num_migr = 5
num_started = 0
num_finished = 0
num_clients = 1
cv = threading.Condition()
def exec_clusterings(reqs, cl, exec_time, idx):
global num_started
global cv
global num_clients
global num_finished
with cv:
while num_started < num_clients:
cv.wait()
cp = client.ClusteringParams()
start = time.time()
cnt = 0
for r in reqs:
cnt += 1
prog_args = [(r, cp)]
response = cl.run_clustering_program(prog_args)
if cnt % 1000 == 0 and idx == 1:
print 'done ' + str(cnt) + ' by client ' + str(idx)
end = time.time()
with cv:
num_finished += 1
cv.notify_all()
exec_time[idx] = end - start
num_requests = 10000
num_nodes = 81306 # snap twitter-combined
# node handles are range(0, num_nodes)
num_vts = 1
clients = []
for i in range(num_clients):
clients.append(client.Client(client._CLIENT_ID + i, i % num_vts))
reqs = []
for i in range(num_clients):
cl_reqs = []
for numr in range(num_requests):
cl_reqs.append(random.randint(0, num_nodes-1))
reqs.append(cl_reqs)
# run before
exec_time = [0] * num_clients
threads = []
print 'Starting first set of requests'
for i in range(num_clients):
thr = threading.Thread(target=exec_clusterings, args=(reqs[i], clients[i], exec_time, i))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time - start_time
for thr in threads:
thr.join()
print 'Total time = ' + str(total_time)
throughput = (num_requests * num_clients) / total_time
print 'Throughput = ' + str(throughput)
print 'Done first set of requests'
# repartition
migr_time = time.time()
for mrun in range(1,num_migr+1):
clients[0].single_stream_migration()
print 'Done repartitioning stream ' + str(mrun)
migr_time = time.time() - migr_time
print 'Migration time total: ' + str(migr_time)
# run after
exec_time = [0] * num_clients
threads = []
num_started = 0
num_finished = 0
print 'Starting second set of requests'
for i in range(num_clients):
thr = threading.Thread(target=exec_clusterings, args=(reqs[i], clients[i], exec_time, i))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time - start_time
for thr in threads:
thr.join()
print 'Total time = ' + str(total_time)
throughput = (num_requests * num_clients) / total_time
print 'Throughput = ' + str(throughput)
print 'Done second set of requests'
| dubey/weaver | tests/python/benchmarks/clustering_migration.py | Python | bsd-3-clause | 3,266 |
"""
Complex example which is a combination of the rr* examples from the zguide.
"""
from gevent import spawn
from gevent_zeromq import zmq
# server
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.connect("tcp://localhost:5560")
def serve(socket):
while True:
message = socket.recv()
print "Received request: ", message
socket.send("World")
server = spawn(serve, socket)
# client
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5559")
# Do 10 requests, waiting each time for a response
def client():
for request in range(1,10):
socket.send("Hello")
message = socket.recv()
print "Received reply ", request, "[", message, "]"
# broker
frontend = context.socket(zmq.XREP)
backend = context.socket(zmq.XREQ);
frontend.bind("tcp://*:5559")
backend.bind("tcp://*:5560")
def proxy(socket_from, socket_to):
while True:
m = socket_from.recv_multipart()
socket_to.send_multipart(m)
a = spawn(proxy, frontend, backend)
b = spawn(proxy, backend, frontend)
spawn(client).join()
| ContinuumIO/gevent-zeromq | examples/reqrep.py | Python | bsd-3-clause | 1,110 |
# -*- coding: latin -*-
NO_ARGS = "Contar o que a quem?"
SELF = "A falar contigo proprio?"
TARGET = "%s conta-te: %s"
MSG = "Tu contas a %s: %s"
NO_TARGET = "O '%s' nao esta' online."
HELP = """Uso: .tell <utilizador> <texto>\r
Envia o <texto> ao <utilizador> de forma privada."""
import src.messages
class Execute:
def __init__(self,session,message):
self.message = message.split()
if len(self.message) < 2 : src.messages.Message(session).User(NO_ARGS)
else:
if self.message[0].strip().lower() == session.user.name.strip().lower(): src.messages.Message(session).User(SELF)
elif self.message[0].strip().lower() in session.allusers.userdict:
src.messages.Message(session).Target(self.message[0].strip().lower(),(TARGET % (session.user.name, message[len(self.message[0]) + 1: ])))
src.messages.Message(session).User(MSG % (session.allusers.userdict[self.message[0].strip().lower()].name, message[len(self.message[0]) + 1: ]))
else: src.messages.Message(session).User(NO_TARGET % self.message[0])
class Help:
def __init__(self,session): src.messages.Message(session).User(HELP)
| marado/pytalker | commands-pt/tell.py | Python | gpl-2.0 | 1,142 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the rc_header formatter'''
# GRD samples exceed the 80 character limit.
# pylint: disable-msg=C6310
import os
import sys
if __name__ == '__main__':
sys.path[0] = os.path.abspath(os.path.join(sys.path[0], '../..'))
import StringIO
import unittest
from grit import exception
from grit import grd_reader
from grit import util
from grit.format import rc_header
class RcHeaderFormatterUnittest(unittest.TestCase):
def FormatAll(self, grd):
output = rc_header.FormatDefines(grd, grd.ShouldOutputAllResourceDefines())
return ''.join(output).replace(' ', '')
def testFormatter(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3" base_dir=".">
<release seq="3">
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_BONGO">
Bongo!
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_NARROW_DIALOG" file="rc_files/dialogs.rc" />
<structure type="version" name="VS_VERSION_INFO" file="rc_files/version.rc" />
</structures>
</release>
</grit>'''), '.')
output = self.FormatAll(grd)
self.failUnless(output.count('IDS_GREETING10000'))
self.failUnless(output.count('ID_LOGO300'))
def testOnlyDefineResourcesThatSatisfyOutputCondition(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<release seq="3">
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_FIRSTPRESENTSTRING" desc="Present in .rc file.">
I will appear in the .rc file.
</message>
<if expr="False"> <!--Do not include in the .rc files until used.-->
<message name="IDS_MISSINGSTRING" desc="Not present in .rc file.">
I will not appear in the .rc file.
</message>
</if>
<if expr="lang != 'es'">
<message name="IDS_LANGUAGESPECIFICSTRING" desc="Present in .rc file.">
Hello.
</message>
</if>
<if expr="lang == 'es'">
<message name="IDS_LANGUAGESPECIFICSTRING" desc="Present in .rc file.">
Hola.
</message>
</if>
<message name="IDS_THIRDPRESENTSTRING" desc="Present in .rc file.">
I will also appear in the .rc file.
</message>
</messages>
</release>
</grit>'''), '.')
output = self.FormatAll(grd)
self.failUnless(output.count('IDS_FIRSTPRESENTSTRING10000'))
self.failIf(output.count('IDS_MISSINGSTRING'))
self.failIf(output.count('10001')) # IDS_MISSINGSTRING should get this ID
self.failUnless(output.count('IDS_LANGUAGESPECIFICSTRING10002'))
self.failUnless(output.count('IDS_THIRDPRESENTSTRING10003'))
def testExplicitFirstIdOverlaps(self):
# second first_id will overlap preexisting range
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3" base_dir=".">
<release seq="3">
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="301">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>
</release>
</grit>'''), '.')
self.assertRaises(exception.IdRangeOverlap, self.FormatAll, grd)
def testImplicitOverlapsPreexisting(self):
# second message in <messages> will overlap preexisting range
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3" base_dir=".">
<release seq="3">
<includes first_id="301" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="300">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>
</release>
</grit>'''), '.')
self.assertRaises(exception.IdRangeOverlap, self.FormatAll, grd)
def testEmit(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3" base_dir=".">
<outputs>
<output type="rc_all" filename="dummy">
<emit emit_type="prepend">Wrong</emit>
</output>
<if expr="False">
<output type="rc_header" filename="dummy">
<emit emit_type="prepend">No</emit>
</output>
</if>
<output type="rc_header" filename="dummy">
<emit emit_type="append">Error</emit>
</output>
<output type="rc_header" filename="dummy">
<emit emit_type="prepend">Bingo</emit>
</output>
</outputs>
</grit>'''), '.')
output = ''.join(rc_header.Format(grd, 'en', '.'))
output = util.StripBlankLinesAndComments(output)
self.assertEqual('#pragma once\nBingo', output)
if __name__ == '__main__':
unittest.main()
| leighpauls/k2cro4 | tools/grit/grit/format/rc_header_unittest.py | Python | bsd-3-clause | 6,724 |
from typing import List
from bson import ObjectId
from common.common_consts import zero_trust_consts
from monkey_island.cc.models.zero_trust.event import Event
from monkey_island.cc.models.zero_trust.monkey_finding import MonkeyFinding
from monkey_island.cc.models.zero_trust.monkey_finding_details import MonkeyFindingDetails
class MonkeyZTFindingService:
@staticmethod
def create_or_add_to_existing(test: str, status: str, events: List[Event]):
"""
Create a new finding or add the events to an existing one if it's the same (same meaning
same status and same
test).
:raises: Assertion error if this is used when there's more then one finding which fits
the query - this is not
when this function should be used.
"""
existing_findings = list(MonkeyFinding.objects(test=test, status=status))
assert len(existing_findings) < 2, "More than one finding exists for {}:{}".format(
test, status
)
if len(existing_findings) == 0:
MonkeyZTFindingService.create_new_finding(test, status, events)
else:
# Now we know for sure this is the only one
MonkeyZTFindingService.add_events(existing_findings[0], events)
@staticmethod
def create_new_finding(test: str, status: str, events: List[Event]):
details = MonkeyFindingDetails()
details.events = events
details.save()
MonkeyFinding.save_finding(test, status, details)
@staticmethod
def add_events(finding: MonkeyFinding, events: List[Event]):
finding.details.fetch().add_events(events).save()
@staticmethod
def get_events_by_finding(finding_id: str) -> List[object]:
finding = MonkeyFinding.objects.get(id=finding_id)
pipeline = [
{"$match": {"_id": ObjectId(finding.details.id)}},
{"$unwind": "$events"},
{"$project": {"events": "$events"}},
{"$replaceRoot": {"newRoot": "$events"}},
]
return list(MonkeyFindingDetails.objects.aggregate(*pipeline))
@staticmethod
def add_malicious_activity_to_timeline(events):
MonkeyZTFindingService.create_or_add_to_existing(
test=zero_trust_consts.TEST_MALICIOUS_ACTIVITY_TIMELINE,
status=zero_trust_consts.STATUS_VERIFY,
events=events,
)
| guardicore/monkey | monkey/monkey_island/cc/services/zero_trust/monkey_findings/monkey_zt_finding_service.py | Python | gpl-3.0 | 2,388 |
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Funnel chart
"""
from __future__ import division
from pygal.util import decorate, cut, compute_scale
from pygal.adapters import positive, none_to_zero
from pygal.graph.graph import Graph
class Funnel(Graph):
"""Funnel graph"""
_adapters = [positive, none_to_zero]
def _format(self, value):
return super(Funnel, self)._format(abs(value))
def funnel(self, serie_node, serie, index):
"""Draw a dot line"""
fmt = lambda x: '%f %f' % x
for i, poly in enumerate(serie.points):
metadata = serie.metadata.get(i)
value = self._format(serie.values[i])
funnels = decorate(
self.svg,
self.svg.node(serie_node['plot'], class_="funnels"),
metadata)
self.svg.node(
funnels, 'polygon',
points=' '.join(map(fmt, map(self.view, poly))),
class_='funnel reactive tooltip-trigger')
x, y = self.view((
self._x_labels[index][1], # Poly center from label
sum([point[1] for point in poly]) / len(poly)))
self._tooltip_data(funnels, value, x, y, classes='centered')
self._static_value(serie_node, value, x, y)
def _compute(self):
x_pos = [
(x + 1) / self._order for x in range(self._order)
] if self._order != 1 else [.5] # Center if only one value
previous = [[self.zero, self.zero] for i in range(self._len)]
for i, serie in enumerate(self.series):
y_height = - sum(serie.safe_values) / 2
all_x_pos = [0] + x_pos
serie.points = []
for j, value in enumerate(serie.values):
poly = []
poly.append((all_x_pos[i], previous[j][0]))
poly.append((all_x_pos[i], previous[j][1]))
previous[j][0] = y_height
y_height = previous[j][1] = y_height + value
poly.append((all_x_pos[i + 1], previous[j][1]))
poly.append((all_x_pos[i + 1], previous[j][0]))
serie.points.append(poly)
val_max = max(list(map(sum, cut(self.series, 'values'))) + [self.zero])
self._box.ymin = -val_max
self._box.ymax = val_max
y_pos = compute_scale(
self._box.ymin, self._box.ymax, self.logarithmic, self.order_min
) if not self.y_labels else map(float, self.y_labels)
self._x_labels = list(
zip(cut(self.series, 'title'),
map(lambda x: x - 1 / (2 * self._order), x_pos)))
self._y_labels = list(zip(map(self._format, y_pos), y_pos))
def _plot(self):
for index, serie in enumerate(self.series):
self.funnel(
self._serie(index), serie, index)
| colevscode/pygal | pygal/graph/funnel.py | Python | lgpl-3.0 | 3,592 |
#!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_switch_setup
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify switch-setup
description:
- This module can be used to modify switch setup.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- State the action to perform. Use C(update) to modify the switch-setup.
required: true
type: str
choices: ['update']
pn_force:
description:
- Force analytics-store change even if it involves removing data.
required: false
type: bool
pn_dns_ip:
description:
- DNS IP address.
required: false
type: str
pn_mgmt_netmask:
description:
- Netmask.
required: false
type: str
pn_gateway_ip6:
description:
- Gateway IPv6 address.
required: false
type: str
pn_in_band_ip6_assign:
description:
- Data IPv6 address assignment.
required: false
type: str
choices: ['none', 'autoconf']
pn_domain_name:
description:
- Domain name.
required: false
type: str
pn_timezone:
description:
- Timezone to be configured.
required: false
type: str
pn_in_band_netmask:
description:
- Data in-band netmask.
required: false
type: str
pn_in_band_ip6:
description:
- Data in-band IPv6 address.
required: false
type: str
pn_in_band_netmask_ip6:
description:
- Data in-band IPv6 netmask.
required: false
type: str
pn_motd:
description:
- Message of the Day.
required: false
type: str
pn_loopback_ip6:
description:
- loopback IPv6 address.
required: false
type: str
pn_mgmt_ip6_assignment:
description:
- IPv6 address assignment.
required: false
choices: ['none', 'autoconf']
pn_ntp_secondary_server:
description:
- Secondary NTP server.
required: false
type: str
pn_in_band_ip:
description:
- data in-band IP address.
required: false
type: str
pn_eula_accepted:
description:
- Accept EULA.
required: false
type: str
choices: ['true', 'false']
pn_mgmt_ip:
description:
- Management IP address.
required: false
type: str
pn_ntp_server:
description:
- NTP server.
required: false
type: str
pn_mgmt_ip_assignment:
description:
- IP address assignment.
required: false
type: str
choices: ['none', 'dhcp']
pn_date:
description:
- Date.
required: false
type: str
pn_password:
description:
- plain text password.
required: false
type: str
pn_banner:
description:
- Banner to display on server-switch.
required: false
type: str
pn_loopback_ip:
description:
- loopback IPv4 address.
required: false
type: str
pn_dns_secondary_ip:
description:
- secondary DNS IP address.
required: false
type: str
pn_switch_name:
description:
- switch name.
required: false
type: str
pn_eula_timestamp:
description:
- EULA timestamp.
required: false
type: str
pn_mgmt_netmask_ip6:
description:
- IPv6 netmask.
required: false
type: str
pn_enable_host_ports:
description:
- Enable host ports by default.
required: false
type: bool
pn_mgmt_ip6:
description:
- IPv6 address.
required: false
type: str
pn_analytics_store:
description:
- type of disk storage for analytics.
required: false
type: str
choices: ['default', 'optimized']
pn_gateway_ip:
description:
- gateway IPv4 address.
required: false
type: str
"""
EXAMPLES = """
- name: Modify switch
pn_switch_setup:
pn_cliswitch: "sw01"
state: "update"
pn_timezone: "America/New_York"
pn_in_band_ip: "20.20.1.1"
pn_in_band_netmask: "24"
- name: Modify switch
pn_switch_setup:
pn_cliswitch: "sw01"
state: "update"
pn_in_band_ip6: "2001:0db8:85a3::8a2e:0370:7334"
pn_in_band_netmask_ip6: "127"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the switch-setup command.
returned: always
type: list
stderr:
description: set of error responses from the switch-setup command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, booleanArgs, run_cli
def main():
""" This section is for arguments parsing """
state_map = dict(
update='switch-setup-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['update']),
pn_force=dict(required=False, type='bool'),
pn_dns_ip=dict(required=False, type='str'),
pn_mgmt_netmask=dict(required=False, type='str'),
pn_gateway_ip6=dict(required=False, type='str'),
pn_in_band_ip6_assign=dict(required=False, type='str',
choices=['none', 'autoconf']),
pn_domain_name=dict(required=False, type='str'),
pn_timezone=dict(required=False, type='str'),
pn_in_band_netmask=dict(required=False, type='str'),
pn_in_band_ip6=dict(required=False, type='str'),
pn_in_band_netmask_ip6=dict(required=False, type='str'),
pn_motd=dict(required=False, type='str'),
pn_loopback_ip6=dict(required=False, type='str'),
pn_mgmt_ip6_assignment=dict(required=False, type='str',
choices=['none', 'autoconf']),
pn_ntp_secondary_server=dict(required=False, type='str'),
pn_in_band_ip=dict(required=False, type='str'),
pn_eula_accepted=dict(required=False, type='str',
choices=['true', 'false']),
pn_mgmt_ip=dict(required=False, type='str'),
pn_ntp_server=dict(required=False, type='str'),
pn_mgmt_ip_assignment=dict(required=False, type='str',
choices=['none', 'dhcp']),
pn_date=dict(required=False, type='str'),
pn_password=dict(required=False, type='str', no_log=True),
pn_banner=dict(required=False, type='str'),
pn_loopback_ip=dict(required=False, type='str'),
pn_dns_secondary_ip=dict(required=False, type='str'),
pn_switch_name=dict(required=False, type='str'),
pn_eula_timestamp=dict(required=False, type='str'),
pn_mgmt_netmask_ip6=dict(required=False, type='str'),
pn_enable_host_ports=dict(required=False, type='bool'),
pn_mgmt_ip6=dict(required=False, type='str'),
pn_analytics_store=dict(required=False, type='str',
choices=['default', 'optimized']),
pn_gateway_ip=dict(required=False, type='str'),
),
required_one_of=[['pn_force', 'pn_dns_ip', 'pn_mgmt_netmask',
'pn_gateway_ip6', 'pn_in_band_ip6_assign',
'pn_domain_name', 'pn_timezone',
'pn_in_band_netmask', 'pn_in_band_ip6',
'pn_in_band_netmask_ip6', 'pn_motd',
'pn_loopback_ip6', 'pn_mgmt_ip6_assignment',
'pn_ntp_secondary_server', 'pn_in_band_ip',
'pn_eula_accepted', 'pn_mgmt_ip',
'pn_ntp_server', 'pn_mgmt_ip_assignment',
'pn_date', 'pn_password',
'pn_banner', 'pn_loopback_ip',
'pn_dns_secondary_ip', 'pn_switch_name',
'pn_eula_timestamp', 'pn_mgmt_netmask_ip6',
'pn_enable_host_ports', 'pn_mgmt_ip6',
'pn_analytics_store', 'pn_gateway_ip']],
required_together=[['pn_in_band_ip6', 'pn_in_band_netmask_ip6'],
['pn_in_band_ip', 'pn_in_band_netmask'],
['pn_mgmt_ip', 'pn_mgmt_netmask'],
['pn_mgmt_ip6', 'pn_mgmt_netmask_ip6']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
force = module.params['pn_force']
dns_ip = module.params['pn_dns_ip']
mgmt_netmask = module.params['pn_mgmt_netmask']
gateway_ip6 = module.params['pn_gateway_ip6']
in_band_ip6_assign = module.params['pn_in_band_ip6_assign']
domain_name = module.params['pn_domain_name']
timezone = module.params['pn_timezone']
in_band_netmask = module.params['pn_in_band_netmask']
in_band_ip6 = module.params['pn_in_band_ip6']
in_band_netmask_ip6 = module.params['pn_in_band_netmask_ip6']
motd = module.params['pn_motd']
loopback_ip6 = module.params['pn_loopback_ip6']
mgmt_ip6_assignment = module.params['pn_mgmt_ip6_assignment']
ntp_secondary_server = module.params['pn_ntp_secondary_server']
in_band_ip = module.params['pn_in_band_ip']
eula_accepted = module.params['pn_eula_accepted']
mgmt_ip = module.params['pn_mgmt_ip']
ntp_server = module.params['pn_ntp_server']
mgmt_ip_assignment = module.params['pn_mgmt_ip_assignment']
date = module.params['pn_date']
password = module.params['pn_password']
banner = module.params['pn_banner']
loopback_ip = module.params['pn_loopback_ip']
dns_secondary_ip = module.params['pn_dns_secondary_ip']
switch_name = module.params['pn_switch_name']
eula_timestamp = module.params['pn_eula_timestamp']
mgmt_netmask_ip6 = module.params['pn_mgmt_netmask_ip6']
enable_host_ports = module.params['pn_enable_host_ports']
mgmt_ip6 = module.params['pn_mgmt_ip6']
analytics_store = module.params['pn_analytics_store']
gateway_ip = module.params['pn_gateway_ip']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'switch-setup-modify':
cli += ' %s ' % command
if dns_ip:
cli += ' dns-ip ' + dns_ip
if mgmt_netmask:
cli += ' mgmt-netmask ' + mgmt_netmask
if gateway_ip6:
cli += ' gateway-ip6 ' + gateway_ip6
if in_band_ip6_assign:
cli += ' in-band-ip6-assign ' + in_band_ip6_assign
if domain_name:
cli += ' domain-name ' + domain_name
if timezone:
cli += ' timezone ' + timezone
if in_band_netmask:
cli += ' in-band-netmask ' + in_band_netmask
if in_band_ip6:
cli += ' in-band-ip6 ' + in_band_ip6
if in_band_netmask_ip6:
cli += ' in-band-netmask-ip6 ' + in_band_netmask_ip6
if motd:
cli += ' motd ' + motd
if loopback_ip6:
cli += ' loopback-ip6 ' + loopback_ip6
if mgmt_ip6_assignment:
cli += ' mgmt-ip6-assignment ' + mgmt_ip6_assignment
if ntp_secondary_server:
cli += ' ntp-secondary-server ' + ntp_secondary_server
if in_band_ip:
cli += ' in-band-ip ' + in_band_ip
if eula_accepted:
cli += ' eula-accepted ' + eula_accepted
if mgmt_ip:
cli += ' mgmt-ip ' + mgmt_ip
if ntp_server:
cli += ' ntp-server ' + ntp_server
if mgmt_ip_assignment:
cli += ' mgmt-ip-assignment ' + mgmt_ip_assignment
if date:
cli += ' date ' + date
if password:
cli += ' password ' + password
if banner:
cli += ' banner ' + banner
if loopback_ip:
cli += ' loopback-ip ' + loopback_ip
if dns_secondary_ip:
cli += ' dns-secondary-ip ' + dns_secondary_ip
if switch_name:
cli += ' switch-name ' + switch_name
if eula_timestamp:
cli += ' eula_timestamp ' + eula_timestamp
if mgmt_netmask_ip6:
cli += ' mgmt-netmask-ip6 ' + mgmt_netmask_ip6
if mgmt_ip6:
cli += ' mgmt-ip6 ' + mgmt_ip6
if analytics_store:
cli += ' analytics-store ' + analytics_store
if gateway_ip:
cli += ' gateway-ip ' + gateway_ip
cli += booleanArgs(force, 'force', 'no-force')
cli += booleanArgs(enable_host_ports, 'enable-host-ports', 'disable-host-ports')
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| alxgu/ansible | lib/ansible/modules/network/netvisor/pn_switch_setup.py | Python | gpl-3.0 | 13,294 |
#!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat,1.4,10],[traindat,testdat,1.5,10]]
def preprocessor_normone (fm_train_real=traindat,fm_test_real=testdat,width=1.4,size_cache=10):
from shogun import Chi2Kernel
from shogun import RealFeatures
from shogun import NormOne
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
preprocessor=NormOne()
preprocessor.fit(feats_train)
feats_train = preprocessor.transform(feats_train)
feats_test = preprocessor.transform(feats_test)
kernel=Chi2Kernel(feats_train, feats_train, width, size_cache)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('NormOne')
preprocessor_normone(*parameter_list[0])
| sorig/shogun | examples/undocumented/python/preprocessor_normone.py | Python | bsd-3-clause | 978 |
#!/usr/bin/env python
import base64
import os
from avocado import Test
from avocado import main
class WhiteBoard(Test):
"""
Simple test that saves test custom data to the test whiteboard
:param whiteboard_data_file: File to be used as source for whiteboard data
:param whiteboard_data_size: Size of the generated data of the whiteboard
:param whiteboard_data_text: Text used when no file supplied
:param whiteboard_writes: How many times to copy the data into whiteboard
"""
def test(self):
os.chdir(self.datadir) # Just to stress out Avocado
data_file = self.params.get('whiteboard_data_file', default='')
data_size = self.params.get('whiteboard_data_size', default='10')
if data_file:
self.log.info('Writing data to whiteboard from file: %s',
data_file)
whiteboard_file = open(data_file, 'r')
size = int(data_size)
data = whiteboard_file.read(size)
else:
offset = int(data_size) - 1
data = self.params.get('whiteboard_data_text',
default='default whiteboard text')[0:offset]
iterations = int(self.params.get('whiteboard_writes', default=1))
result = ''
for _ in xrange(0, iterations):
result += data
self.whiteboard = base64.encodestring(result)
if __name__ == "__main__":
main()
| thomas-schmid-ubnt/avocado | examples/tests/whiteboard.py | Python | gpl-2.0 | 1,449 |
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="volume", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/volume/_textsrc.py | Python | mit | 437 |
import os
import api_user
import re
from logger import Logger
ADDRESSVERSIONS = (3,4)
class BMAMaster:
def __init__(self, configPath=None, apiUser=None):
self.config = loadConfig(configPath)
if 'runPath' in self.config:
os.chdir(self.config['runPath'])
logPath = self.config['logPath']
self.logger = Logger(logPath)
self.mainAddress = self.config['mainAddress']
self.bittextAddress = self.config['bittextAddress']
self.chanAddress = self.config['chanAddress']
self.broadcastAddress = self.config['broadcastAddress']
if apiUser:
self.apiUser = apiUser
else:
self.apiUser = api_user.ApiUser(config=self.config)
def getChanAddresses(self):
'''return a dict mapping chan address to label'''
addresses = self.apiUser.listAddresses()
return dict([(i['address'], i['label'][6:].strip()) for i in addresses
if i['chan']])
def getChanLabels(self):
'''return a dict mapping chan labels to addresses'''
addresses = [i for i in self.apiUser.listAddresses() if i['chan']]
addresses.reverse() #list newer addresses first
labels = {}
for i in addresses:
label = i['label'][6:].strip()
address = i['address']
if label in labels:
labels[label].append(address)
else:
labels[label] = [address]
return labels
def getSubscriptions(self):
addresses = self.apiUser.listSubscriptions()
return dict([(i['address'], i['label'].decode('base64')) for i in addresses])
def getChansAndSubscriptions(self):
chans = self.getChanAddresses()
subs = self.getSubscriptions()
return chans, subs
def getAddressListReport(self):
header = self.getText('reportheaders/addressList', **self.config)
addressList = self.listChansAndSubscriptions()
report = header + addressList
return report
def listChansAndSubscriptions(self):
chans, subs = self.getChansAndSubscriptions()
chans = [(chans[i],i) for i in chans]
chans.sort(key=lambda x:x[0].lower())
subs = [(subs[i],i) for i in subs]
subs.sort(key=lambda x:x[0].lower())
lst = 'Chans:\n======\n'
for label, address in chans:
lst += '%s\t%s\n' %(address, label)
lst += '\n\nBroadcasts:\n===========\n'
for label, address in subs:
lst += '%s\t%s\n' %(address, label)
return lst
def updateBittext(self, id, subject, message):
self.logger.log('Updating Bittext, %s, %s' %(id, subject))
fromAddress = self.mainAddress
toAddress = self.bittextAddress
fullSubject = 'mod %s %s' %(id, subject)
self.apiUser.sendMessage(toAddress, fromAddress, fullSubject, message)
def getText(self, filePath, **args):
def sub(matchobj):
assert matchobj
key = matchobj.group(1)
return args[key]
with open(filePath, 'r') as file:
rawText = file.read()
return re.sub(r'\$(\w+)', sub, rawText)
def loadConfig(configPath=None):
if not configPath:
configPath = 'config'
with open(configPath, 'r') as file:
args = {}
for line in file.readlines():
if not line.strip(): continue
key, value = line.split(':')
key = key.strip()
value = value.strip()
args[key]=value
return args
| Eylrid/BMaggregator | bmamaster.py | Python | mit | 3,582 |
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Miscellaneous modules
Contains useful modules that don't belong into any of the
other Cryptodome.* subpackages.
======================== =============================================
Module Description
======================== =============================================
`Cryptodome.Util.number` Number-theoretic functions (primality testing, etc.)
`Cryptodome.Util.Counter` Fast counter functions for CTR cipher modes.
`Cryptodome.Util.RFC1751` Converts between 128-bit keys and human-readable
strings of words.
`Cryptodome.Util.asn1` Minimal support for ASN.1 DER encoding
`Cryptodome.Util.Padding` Set of functions for adding and removing padding.
======================== =============================================
:undocumented: _galois, _number_new, cpuid, py3compat, _raw_api
"""
__all__ = ['RFC1751', 'number', 'strxor', 'asn1', 'Counter', 'Padding']
| hclivess/Stallion | nuitka/Cryptodome/Util/__init__.py | Python | gpl-3.0 | 1,951 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import argparse
try:
from html import unescape # py3.4+
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
import os
import re
import subprocess
import sys
import time
import re
import logging
from codecs import open
from six.moves.urllib.error import URLError
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import urlretrieve
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init
from pelican.utils import slugify, SafeDatetime
logger = logging.getLogger(__name__)
def decode_wp_content(content, br=True):
pre_tags = {}
if content.strip() == "":
return ""
content += "\n"
if "<pre" in content:
pre_parts = content.split("</pre>")
last_pre = pre_parts.pop()
content = ""
pre_index = 0
for pre_part in pre_parts:
start = pre_part.find("<pre")
if start == -1:
content = content + pre_part
continue
name = "<pre wp-pre-tag-{0}></pre>".format(pre_index)
pre_tags[name] = pre_part[start:] + "</pre>"
content = content + pre_part[0:start] + name
pre_index += 1
content = content + last_pre
content = re.sub(r'<br />\s*<br />', "\n\n", content)
allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'
'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'
'map|area|blockquote|address|math|style|p|h[1-6]|hr|'
'fieldset|noscript|samp|legend|section|article|aside|'
'hgroup|header|footer|nav|figure|figcaption|details|'
'menu|summary)')
content = re.sub(r'(<' + allblocks + r'[^>]*>)', "\n\\1", content)
content = re.sub(r'(</' + allblocks + r'>)', "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if "<object" in content:
# no <p> inside object/embed
content = re.sub(r'\s*<param([^>]*)>\s*', "<param\\1>", content)
content = re.sub(r'\s*</embed>\s*', '</embed>', content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
pgraphs = filter(lambda s: s != "", re.split(r'\n\s*\n', content))
content = ""
for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n"
# under certain strange conditions it could create a P of entirely whitespace
content = re.sub(r'<p>\s*</p>', '', content)
content = re.sub(r'<p>([^<]+)</(div|address|form)>', "<p>\\1</p></\\2>", content)
# don't wrap tags
content = re.sub(r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content)
#problem with nested lists
content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
content = content.replace('</blockquote></p>', '</p></blockquote>')
content = re.sub(r'<p>\s*(</?' + allblocks + '[^>]*>)', "\\1", content)
content = re.sub(r'(</?' + allblocks + '[^>]*>)\s*</p>', "\\1", content)
if br:
def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />")
content = re.sub(r'/<(script|style).*?<\/\\1>/s', _preserve_newline, content)
# optionally make line breaks
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n")
content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1", content)
content = re.sub(r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\1', content)
content = re.sub(r'\n</p>', "</p>", content)
if pre_tags:
def _multi_replace(dic, string):
pattern = r'|'.join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
content = _multi_replace(pre_tags, content)
return content
def get_items(xml):
"""Opens a wordpress xml file and returns a list of items"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup4" and "lxml" required to import Wordpress XML files.')
sys.exit(error)
with open(xml, encoding='utf-8') as infile:
xmlfile = infile.read()
soup = BeautifulSoup(xmlfile, "xml")
items = soup.rss.channel.findAll('item')
return items
def get_filename(filename, post_id):
if filename is not None:
return filename
else:
return post_id
def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
items = get_items(xml)
for item in items:
if item.find('status').string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
title = 'No title [%s]' % item.find('post_name').string
logger.warning('Post "%s" is lacking a proper title', title)
filename = item.find('post_name').string
post_id = item.find('post_id').string
filename = get_filename(filename, post_id)
content = item.find('encoded').string
raw_date = item.find('post_date').string
date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
date = time.strftime("%Y-%m-%d %H:%M", date_object)
author = item.find('creator').string
categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]
# caturl = [cat['nicename'] for cat in item.find(domain='category')]
tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]
# To publish a post the status should be 'published'
status = 'published' if item.find('status').string == "publish" else item.find('status').string
kind = 'article'
post_type = item.find('post_type').string
if post_type == 'page':
kind = 'page'
elif wp_custpost:
if post_type == 'post':
pass
# Old behaviour was to name everything not a page as an article.
# Theoretically all attachments have status == inherit so
# no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
elif post_type == 'attachment':
pass
else:
kind = post_type
yield (title, content, filename, date, author, categories, tags, status,
kind, "wp-html")
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup4" and "lxml" required to import Dotclear files.')
sys.exit(error)
in_cat = False
in_post = False
category_list = {}
posts = []
with open(file, 'r', encoding='utf-8') as f:
for line in f:
# remove final \n
line = line[:-1]
if line.startswith('[category'):
in_cat = True
elif line.startswith('[post'):
in_post = True
elif in_cat:
fields = line.split('","')
if not line:
in_cat = False
else:
# remove 1st and last ""
fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1]
category_list[fields[0]]=fields[2]
elif in_post:
if not line:
in_post = False
break
else:
posts.append(line)
print("%i posts read." % len(posts))
for post in posts:
fields = post.split('","')
# post_id = fields[0][1:]
# blog_id = fields[1]
# user_id = fields[2]
cat_id = fields[3]
# post_dt = fields[4]
# post_tz = fields[5]
post_creadt = fields[6]
# post_upddt = fields[7]
# post_password = fields[8]
# post_type = fields[9]
post_format = fields[10]
# post_url = fields[11]
# post_lang = fields[12]
post_title = fields[13]
post_excerpt = fields[14]
post_excerpt_xhtml = fields[15]
post_content = fields[16]
post_content_xhtml = fields[17]
# post_notes = fields[18]
# post_words = fields[19]
# post_status = fields[20]
# post_selected = fields[21]
# post_position = fields[22]
# post_open_comment = fields[23]
# post_open_tb = fields[24]
# nb_comment = fields[25]
# nb_trackback = fields[26]
post_meta = fields[27]
# redirect_url = fields[28][:-1]
# remove seconds
post_creadt = ':'.join(post_creadt.split(':')[0:2])
author = ""
categories = []
tags = []
if cat_id:
categories = [category_list[id].strip() for id in cat_id.split(',')]
# Get tags related to a post
tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '')
if len(tag) > 1:
if int(tag[:1]) == 1:
newtag = tag.split('"')[1]
tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
else:
i=1
j=1
while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\','')
tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
i=i+1
if j < int(tag[:1])*2:
j=j+2
"""
dotclear2 does not use markdown by default unless you use the markdown plugin
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
"""
if post_format == "markdown":
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
content = content.replace('\\n', '')
post_format = "html"
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
yield (post_title, content, slugify(post_title), post_creadt, author,
categories, tags, status, kind, post_format)
def posterous2fields(api_token, email, password):
"""Imports posterous posts"""
import base64
from datetime import timedelta
try:
# py3k import
import json
except ImportError:
# py2 import
import simplejson as json
try:
# py3k import
import urllib.request as urllib_request
except ImportError:
# py2 import
import urllib2 as urllib_request
def get_posterous_posts(api_token, email, password, page = 1):
base64string = base64.encodestring(("%s:%s" % (email, password)).encode('utf-8')).replace(b'\n', b'')
url = "http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d" % (api_token, page)
request = urllib_request.Request(url)
request.add_header("Authorization", "Basic %s" % base64string.decode())
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts
page = 1
posts = get_posterous_posts(api_token, email, password, page)
while len(posts) > 0:
posts = get_posterous_posts(api_token, email, password, page)
page += 1
for post in posts:
slug = post.get('slug')
if not slug:
slug = slugify(post.get('title'))
tags = [tag.get('name') for tag in post.get('tags')]
raw_date = post.get('display_date')
date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S")
offset = int(raw_date[-5:])
delta = timedelta(hours = offset / 100)
date_object -= delta
date = date_object.strftime("%Y-%m-%d %H:%M")
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
yield (post.get('title'), post.get('body_cleaned'), slug, date,
post.get('user').get('display_name'), [], tags, status, kind, "html")
def chyrp2fields(atom):
"""Opens a Chyrp Atom file, and yield pelican fields"""
import feedparser
import markdown
d = feedparser.parse(atom)
for entry in d.entries:
if entry.chyrp_status == 'public' and entry.chyrp_feather == 'text':
# Chyrp support both html and markdown, must convert by finding type
# content = markdown.markdown(entry.summary)
content = HTMLParser().unescape(entry.summary)
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
if hasattr(entry, "updated_parsed") else None)
author = entry.author if hasattr(entry, "author") else None
tags = entry.tags if hasattr(entry, "tags") else None
slug = entry.chyrp_url if hasattr(entry, "chyrp_url") else None
tags = [tag[1] for tag in re.findall(r"(.*)\:\s*\"(.*)\"", entry.tags)] if hasattr(entry, "tags") else None
yield (entry.title, content, slug, date, author, [], tags, "html")
def tumblr2fields(api_key, blogname):
""" Imports Tumblr posts (API v2)"""
from time import strftime, localtime
try:
# py3k import
import json
except ImportError:
# py2 import
import simplejson as json
try:
# py3k import
import urllib.request as urllib_request
except ImportError:
# py2 import
import urllib2 as urllib_request
def get_tumblr_posts(api_key, blogname, offset=0):
url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw" % (blogname, api_key, offset)
request = urllib_request.Request(url)
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts.get('response').get('posts')
offset = 0
posts = get_tumblr_posts(api_key, blogname, offset)
while len(posts) > 0:
for post in posts:
title = post.get('title') or post.get('source_title') or post.get('type').capitalize()
slug = post.get('slug') or slugify(title)
tags = post.get('tags')
timestamp = post.get('timestamp')
date = strftime("%Y-%m-%d %H:%M:%S", localtime(int(timestamp)))
slug = strftime("%Y-%m-%d-", localtime(int(timestamp))) + slug
format = post.get('format')
content = post.get('body')
type = post.get('type')
if type == 'photo':
if format == 'markdown':
fmtstr = ''
else:
fmtstr = '<img alt="%s" src="%s" />'
content = '\n'.join(fmtstr % (photo.get('caption'), photo.get('original_size').get('url')) for photo in post.get('photos'))
content += '\n\n' + post.get('caption')
elif type == 'quote':
if format == 'markdown':
fmtstr = '\n\n— %s'
else:
fmtstr = '<p>— %s</p>'
content = post.get('text') + fmtstr % post.get('source')
elif type == 'link':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('url') + post.get('description')
elif type == 'audio':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('source_url') + post.get('caption') + post.get('player')
elif type == 'video':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('source_url') + post.get('caption') + '\n'.join(player.get('embed_code') for player in post.get('player'))
elif type == 'answer':
title = post.get('question')
content = '<p><a href="%s" rel="external nofollow">%s</a>: %s</p>\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer'))
content = content.rstrip() + '\n'
kind = 'article'
status = 'published' # TODO: Find a way for draft posts
yield (title, content, slug, date, post.get('blog_name'), [type],
tags, status, kind, format)
offset += len(posts)
posts = get_tumblr_posts(api_key, blogname, offset)
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
d = feedparser.parse(file)
for entry in d.entries:
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
if hasattr(entry, "updated_parsed") else None)
author = entry.author if hasattr(entry, "author") else None
tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None
slug = slugify(entry.title)
kind = 'article'
yield (entry.title, entry.description, slug, date, author, [], tags, None,
kind, "html")
def build_header(title, date, author, categories, tags, slug, status=None, attachments=None):
from docutils.utils import column_width
"""Build a header from a list of fields"""
header = '%s\n%s\n' % (title, '#' * column_width(title))
if date:
header += ':date: %s\n' % date
if author:
header += ':author: %s\n' % author
if categories:
header += ':category: %s\n' % ', '.join(categories)
if tags:
header += ':tags: %s\n' % ', '.join(tags)
if slug:
header += ':slug: %s\n' % slug
if status:
header += ':status: %s\n' % status
if attachments:
header += ':attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
def build_markdown_header(title, date, author, categories, tags, slug, status=None,
attachments=None):
"""Build a header from a list of fields"""
header = 'Title: %s\n' % title
if date:
header += 'Date: %s\n' % date
if author:
header += 'Author: %s\n' % author
if categories:
header += 'Category: %s\n' % ', '.join(categories)
if tags:
header += 'Tags: %s\n' % ', '.join(tags)
if slug:
header += 'Slug: %s\n' % slug
if status:
header += 'Status: %s\n' % status
if attachments:
header += 'Attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
def get_ext(out_markup, in_markup='html'):
if in_markup == 'markdown' or out_markup == 'markdown':
ext = '.md'
else:
ext = '.rst'
return ext
def get_out_filename(output_path, filename, ext, kind,
dirpage, dircat, categories, wp_custpost):
filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see
# http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
filename = filename.lstrip('.') # should not start with a dot
if not filename:
filename = '_'
filename = filename[:249] # allow for 5 extra characters
out_filename = os.path.join(output_path, filename+ext)
# option to put page posts in pages/ subdirectory
if dirpage and kind == 'page':
pages_dir = os.path.join(output_path, 'pages')
if not os.path.isdir(pages_dir):
os.mkdir(pages_dir)
out_filename = os.path.join(pages_dir, filename+ext)
elif not dirpage and kind == 'page':
pass
# option to put wp custom post types in directories with post type
# names. Custom post types can also have categories so option to
# create subdirectories with category names
elif kind != 'article':
if wp_custpost:
typename = slugify(kind)
else:
typename = ''
kind = 'article'
if dircat and (len(categories) > 0):
catname = slugify(categories[0])
else:
catname = ''
out_filename = os.path.join(output_path, typename,
catname, filename+ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names
elif dircat and (len(categories) > 0):
catname = slugify(categories[0])
out_filename = os.path.join(output_path, catname, filename+ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
return out_filename
def get_attachments(xml):
"""returns a dictionary of posts that have attachments with a list
of the attachment_urls
"""
items = get_items(xml)
names = {}
attachments = []
for item in items:
kind = item.find('post_type').string
filename = item.find('post_name').string
post_id = item.find('post_id').string
if kind == 'attachment':
attachments.append((item.find('post_parent').string,
item.find('attachment_url').string))
else:
filename = get_filename(filename, post_id)
names[post_id] = filename
attachedposts = {}
for parent, url in attachments:
try:
parent_name = names[parent]
except KeyError:
#attachment's parent is not a valid post
parent_name = None
try:
attachedposts[parent_name].append(url)
except KeyError:
attachedposts[parent_name] = []
attachedposts[parent_name].append(url)
return attachedposts
def download_attachments(output_path, urls):
"""Downloads wordpress attachments and returns a list of paths to
attachments that can be associated with a post (relative path to output
directory). Files that fail to download, will not be added to posts"""
locations = []
for url in urls:
path = urlparse(url).path
#teardown path and rebuild to negate any errors with
#os.path.join and leading /'s
path = path.split('/')
filename = path.pop(-1)
localpath = ''
for item in path:
if sys.platform != 'win32' or ':' not in item:
localpath = os.path.join(localpath, item)
full_path = os.path.join(output_path, localpath)
if not os.path.exists(full_path):
os.makedirs(full_path)
print('downloading {}'.format(filename))
try:
urlretrieve(url, os.path.join(full_path, filename))
locations.append(os.path.join(localpath, filename))
except (URLError, IOError) as e:
#Python 2.7 throws an IOError rather Than URLError
logger.warning("No file could be downloaded from %s\n%s", url, e)
return locations
def fields2pelican(fields, out_markup, output_path,
dircat=False, strip_raw=False, disable_slugs=False,
dirpage=False, filename_template=None, filter_author=None,
wp_custpost=False, wp_attach=False, attachments=None):
for (title, content, filename, date, author, categories, tags, status,
kind, in_markup) in fields:
if filter_author and filter_author != author:
continue
slug = not disable_slugs and filename or None
if wp_attach and attachments:
try:
urls = attachments[filename]
attached_files = download_attachments(output_path, urls)
except KeyError:
attached_files = None
else:
attached_files = None
ext = get_ext(out_markup, in_markup)
if ext == '.md':
header = build_markdown_header(title, date, author, categories,
tags, slug, status, attached_files)
else:
out_markup = "rst"
header = build_header(title, date, author, categories,
tags, slug, status, attached_files)
out_filename = get_out_filename(output_path, filename, ext,
kind, dirpage, dircat, categories, wp_custpost)
print(out_filename)
if in_markup in ("html", "wp-html"):
html_filename = os.path.join(output_path, filename+'.html')
with open(html_filename, 'w', encoding='utf-8') as fp:
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
if in_markup == "wp-html":
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs)
fp.write(new_content)
parse_raw = '--parse-raw' if not strip_raw else ''
cmd = ('pandoc --normalize {0} --from=html'
' --to={1} -o "{2}" "{3}"').format(
parse_raw, out_markup, out_filename, html_filename)
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
error = "Child was terminated by signal %d" % -rc
exit(error)
elif rc > 0:
error = "Please, check your Pandoc installation."
exit(error)
except OSError as e:
error = "Pandoc execution failed: %s" % e
exit(error)
os.remove(html_filename)
with open(out_filename, 'r', encoding='utf-8') as fs:
content = fs.read()
if out_markup == "markdown":
# In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line
content = content.replace("\\\n ", " \n")
content = content.replace("\\\n", " \n")
with open(out_filename, 'w', encoding='utf-8') as fs:
fs.write(header + content)
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
urls = attachments[None]
orphan_galleries = download_attachments(output_path, urls)
def main():
parser = argparse.ArgumentParser(
description="Transform feed, WordPress, Tumblr, Dotclear, or Posterous "
"files into reST (rst) or Markdown (md) files. Be sure to "
"have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='input', help='The input file to read')
parser.add_argument('--wpfile', action='store_true', dest='wpfile',
help='Wordpress XML export')
parser.add_argument('--dotclear', action='store_true', dest='dotclear',
help='Dotclear export')
parser.add_argument('--chyrp', action='store_true', dest='chyrp',
help='Chyrp Atom export')
parser.add_argument('--posterous', action='store_true', dest='posterous',
help='Posterous export')
parser.add_argument('--tumblr', action='store_true', dest='tumblr',
help='Tumblr export')
parser.add_argument('--feed', action='store_true', dest='feed',
help='Feed to parse')
parser.add_argument('-o', '--output', dest='output', default='output',
help='Output path')
parser.add_argument('-m', '--markup', dest='markup', default='rst',
help='Output markup format (supports rst & markdown)')
parser.add_argument('--dir-cat', action='store_true', dest='dircat',
help='Put files in directories with categories name')
parser.add_argument('--dir-page', action='store_true', dest='dirpage',
help=('Put files recognised as pages in "pages/" sub-directory'
' (wordpress import only)'))
parser.add_argument('--filter-author', dest='author',
help='Import only post from the specified author')
parser.add_argument('--strip-raw', action='store_true', dest='strip_raw',
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)")
parser.add_argument('--wp-custpost', action='store_true',
dest='wp_custpost',
help='Put wordpress custom post types in directories. If used with '
'--dir-cat option directories will be created as '
'/post_type/category/ (wordpress import only)')
parser.add_argument('--wp-attach', action='store_true', dest='wp_attach',
help='(wordpress import only) Download files uploaded to wordpress as '
'attachments. Files will be added to posts as a list in the post '
'header. All files will be downloaded, even if '
"they aren't associated with a post. Files with be downloaded "
'with their original path inside the output directory. '
'e.g. output/wp-uploads/date/postname/file.jpg '
'-- Requires an internet connection --')
parser.add_argument('--disable-slugs', action='store_true',
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
parser.add_argument('-e', '--email', dest='email',
help="Email address (posterous import only)")
parser.add_argument('-p', '--password', dest='password',
help="Password (posterous import only)")
parser.add_argument('-b', '--blogname', dest='blogname',
help="Blog name (Tumblr import only)")
args = parser.parse_args()
input_type = None
if args.wpfile:
input_type = 'wordpress'
elif args.dotclear:
input_type = 'dotclear'
elif args.chyrp:
input_type = 'chyrp'
elif args.posterous:
input_type = 'posterous'
elif args.tumblr:
input_type = 'tumblr'
elif args.feed:
input_type = 'feed'
else:
error = "You must provide either --wpfile, --dotclear, --posterous, --tumblr, --chyrp or --feed options"
exit(error)
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
error = "Unable to create the output folder: " + args.output
exit(error)
if args.wp_attach and input_type != 'wordpress':
error = "You must be importing a wordpress xml to use the --wp-attach option"
exit(error)
if input_type == 'wordpress':
fields = wp2fields(args.input, args.wp_custpost or False)
elif input_type == 'dotclear':
fields = dc2fields(args.input)
elif input_type == 'chyrp':
fields = chyrp2fields(args.input)
elif input_type == 'posterous':
fields = posterous2fields(args.input, args.email, args.password)
elif input_type == 'tumblr':
fields = tumblr2fields(args.input, args.blogname)
elif input_type == 'feed':
fields = feed2fields(args.input)
if args.wp_attach:
attachments = get_attachments(args.input)
else:
attachments = None
init() # init logging
fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False,
dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False,
filter_author=args.author,
wp_custpost = args.wp_custpost or False,
wp_attach = args.wp_attach or False,
attachments = attachments or None)
| btnpushnmunky/pelican | pelican/tools/pelican_import.py | Python | agpl-3.0 | 33,232 |
# gcompris - mining.py
#
# Copyright (C) 2012 Peter Albrecht
# based on template by Bruno Coudoin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# mining activity.
import math
import gobject
import gtk
import gtk.gdk
import gcompris
import gcompris.bonus
import gcompris.utils
import gcompris.skin
import gcompris.sound
import goocanvas
import pango
import random
import cairo
import mining_tutorial
from mining_tools import Area, BlockingArea
from gcompris import gcompris_gettext as _
class Gcompris_mining:
""" GCompis Mining-Activity """
# The factor to shrink the source image with, in order to make it fit on the screen.
# This has to be larger than 1 (= source image has higher resolution than screen),
# so the image looks still nice, if we zoom in a bit.
source_image_scale = 3.0
# the distance, the mouse cursor has to approach the nugget, triggering the next tutorial step
# (in 800x520 coordinate space) (should be in sync with the graphics in tutorial.svgz)
min_nugget_approach = 50.0
def __init__(self, gcomprisBoard):
""" Constructor """
# Save the gcomprisBoard, it defines everything we need
# to know from the core
self.gcomprisBoard = gcomprisBoard
# Needed to get key_press
gcomprisBoard.disable_im_context = True
##
# initialize and document instance variables
self.nuggets_to_collect = 0
""" the number of nuggets, we need to collect in this level """
self.nugget_count = 0
""" the number of nuggets, we already have collected """
self.need_new_nugget = False
""" used to trigger the creation of a new nugget at next opportunity (only at max zoomed out) """
self.is_game_won = False
""" used to start new game, after game was won and bonus is displayed """
self.__is_game_paused = False
""" used to avoid input (like scrolling) during game pause """
self.tutorial = None
""" handle to the tutorial object """
self.is_tutorial_startable = False
""" can the tutorial be started in this level? """
self.is_tutorial_enabled = False
""" has the tutorial been started for this nugget? """
self.last_mouse_pos_x = None
""" the x position of the mouse pointer, the last time, we saw it (800x520) """
self.last_mouse_pos_y = None
""" the y position of the mouse pointer, the last time, we saw it (800x520) """
def start(self):
""" Load data and start the activity """
self.gcomprisBoard.maxlevel = 3
self.gcomprisBoard.sublevel = 1
self.gcomprisBoard.number_of_sublevel = 1
# Set the buttons we want in the bar.
# We need to set BAR_LEVEL, in order to see the "help" icon!?!
gcompris.bar_set(gcompris.BAR_LEVEL)
gcompris.bar_location(0, -1, 0.8)
# Setup a nugget-blocking-area for the GCompris bar, to avoid placing the nugget behind
# the bar.
# The bar is in the lower, left corner of the screen with dimensions: width = 196; height = 50
self.gc_bar_blocker = BlockingArea(0, 470, 196, 520)
# Create our rootitem. We put each canvas item in it so at the end we
# only have to kill it. The canvas deletes all the items it contains
# automatically.
self.rootitem = goocanvas.Group(parent = self.gcomprisBoard.canvas.get_root_item())
self.rootitem.connect("button_press_event", self.on_button_press)
self.rootitem.connect("motion_notify_event", self.on_mouse_move)
svghandle = gcompris.utils.load_svg("mining/rockwall.svgz")
self.viewport = Viewport(self, self.rootitem)
rockwall_img = goocanvas.Svg(
parent = self.viewport.get_gc_group(),
svg_handle = svghandle,
svg_id = "#BACKGROUND"
)
# the further zoom logic highly depends on the following conditions
assert(gcompris.BOARD_WIDTH == (rockwall_img.get_bounds().x2 / self.source_image_scale))
assert(gcompris.BOARD_HEIGHT == (rockwall_img.get_bounds().y2 / self.source_image_scale))
self.lorry = Lorry(svghandle, self.rootitem)
self.placer = Placer(self)
self.decorations = Decorations(svghandle, self.viewport.get_gc_group(), self.placer)
self.nugget = Nugget(svghandle, self.viewport.get_gc_group())
# create sparkling last, so it is on above the nugget
self.sparkling = Sparkling(svghandle, self.viewport.get_gc_group())
# prepare the tutorial
self.tutorial = mining_tutorial.MiningTutorial(self.rootitem)
# prepare the teacher icon to start the tutorial with
self.teacher_img = goocanvas.Image(
parent = self.rootitem,
pixbuf = gcompris.utils.load_pixmap("mining/tux-teacher.png"),
x = 850,
y = 850,
visibility = goocanvas.ITEM_INVISIBLE
)
self.teacher_img.scale(0.5, 0.5)
self.teacher_img.connect("button_press_event", self.start_tutorial)
gcompris.utils.item_focus_init(self.teacher_img, None)
# initialize the level, to start with
self.set_level(1)
def set_level(self, level):
""" Initialize new level and start it. """
self.end_level()
self.is_tutorial_enabled = False
# store new level
self.gcomprisBoard.level = level
gcompris.bar_set_level(self.gcomprisBoard);
# load level specific values
if level == 1:
self.nuggets_to_collect = 3
self.is_tutorial_startable = True
# add the tutorials blocking area, to avoid the nugget being placed behind
# the tutorial mouse or touchpad
self.placer.add_blocker(self.tutorial.get_blocking_area())
elif level == 2:
self.nuggets_to_collect = 6
self.is_tutorial_startable = False
elif level == 3:
self.nuggets_to_collect = 9
self.is_tutorial_startable = False
else:
print("Warning: No level specific values defined for level %i! Keeping current settings." % level)
# prepare new game
self.is_game_won = False
self.nugget_count = 0
self.need_new_nugget = False
self.nugget.hide()
self.update_lorry()
self.viewport.reset(level)
self.placer.add_blocker(self.gc_bar_blocker)
self.placer.add_blocker(self.lorry)
for blocking_area in self.viewport.get_nugget_blocker():
self.placer.add_blocker(blocking_area)
self.decorations.decorate_viewport(10)
self.place_new_nugget()
def place_new_nugget(self):
""" Place a new nugget to collect on the rockwall """
self.placer.place(self.nugget, self.nugget.reset)
(x, y) = self.nugget.get_sparkling_coordinates()
self.sparkling.reset(x, y)
self.sparkling.animation_start()
self.need_new_nugget = False
# The following sound was copied form "Tuxpaint" (GPL)
gcompris.sound.play_ogg("mining/realrainbow.ogg")
# tutorial stuff
self.is_tutorial_enabled = False
if self.is_tutorial_startable:
self.show_teacher_icon()
else:
self.hide_teacher_icon()
def start_tutorial(self, item, target_item, event):
"""
Start the tutorial (teacher icon has been clicked)
item - The element connected with this callback function
target_item - The element under the cursor
event - gtk.gdk.Event
"""
self.hide_teacher_icon()
self.is_tutorial_enabled = True
self.tutorial.start()
nuggetArea = Area(self.nugget.get_bounds())
# determine state, to start the tutorial from
if self.need_new_nugget:
# we have collected the nugget, and need to zoom out
self.tutorial.set_tutorial_state('zoom out', True)
elif self.nugget.is_visible():
# the nugget needs to be collected
self.tutorial.set_tutorial_state('click', True)
else:
# we are at the beginning: 'move to' and then 'zoom in'
# remember them for potential "move to" tutorial animation restarts
self.last_mouse_pos_x = event.x_root
self.last_mouse_pos_y = event.y_root
self.tutorial.set_tutorial_state('move to', True, event.x_root, event.y_root, nuggetArea.center_x, nuggetArea.center_y)
# we processed this click event
return True
def show_teacher_icon(self):
""" Show the teacher icon (the icon to start the tutorial with """
self.teacher_img.props.visibility = goocanvas.ITEM_VISIBLE
def hide_teacher_icon(self):
""" Hide the teacher icon (the icon to start the tutorial with """
self.teacher_img.props.visibility = goocanvas.ITEM_INVISIBLE
def on_mouse_move(self, item, target_item, event):
"""
The user moved the mouse
item - The element connected with this callback function
target_item - The element under the cursor
event - gtk.gdk.Event
"""
if not self.is_tutorial_enabled:
return True
##
# if we the mouse cursor is close enough to the nugget, switch to next tutorial step
# event.x / .y are based on the target_item, which may be the rockwall or a stone, ...
# so we can't use those, since the stones have another coordinate space :(
# get the coordinates relative to the root of the screen (800 x 520)
x = event.x_root
y = event.y_root
# remember them for potential "move to" tutorial animation restarts
self.last_mouse_pos_x = x
self.last_mouse_pos_y = y
# get_bounds() also gives us coordinates relative to the root of the screen (800 x 520)
nuggetArea = Area(self.nugget.get_bounds())
nx = nuggetArea.center_x
ny = nuggetArea.center_y
# a^2 + b^2 <= c^2
if (x - nx) * (x - nx) + (y - ny) * (y - ny) <= self.min_nugget_approach * self.min_nugget_approach:
# the mouse cursor is close enough, go to next tutorial step
if self.nugget.is_visible():
# the nugget is already visible, so we can fast-forward to the "zoom in" tutorial
self.tutorial.set_tutorial_state('zoom in', False)
self.tutorial.set_tutorial_state('click', False)
else:
# go the default way: "move to", "zoom in", "click", ...
self.tutorial.set_tutorial_state('zoom in', False)
else:
# if we still want to show the user, where to move the mouse pointer to, we need to
# update this animation now
if self.tutorial.get_tutorial_state() == 'move to':
self.restart_tutorial_move_to(x, y)
# we processed this event
return True
def restart_tutorial_move_to(self, mouse_x, mouse_y):
"""
Start a timer to restart the 'move to' tutorial animation
mouse_x, mouse_y: position of the mouse pointer relative to the root of the screen (800 x 520)
"""
# Since event messages like "scroll_event" seem to have to be processed completely, until *.get_bounds()
# delivers us updated coordinates, we have to call the animation restart in another event, following
# the current one. So we set up a timer, waiting 1 millisecond, doing the job. ;)
gobject.timeout_add(1, self.__restart_tutorial_move_to_on_timer, mouse_x, mouse_y)
def __restart_tutorial_move_to_on_timer(self, mouse_x, mouse_y):
"""
Restart the 'move to' tutorial animation (called by a timeout-event from restart_tutorial_move_to())
mouse_x, mouse_y: position of the mouse pointer relative to the root of the screen (800 x 520)
"""
nuggetArea = Area(self.nugget.get_bounds())
nx = nuggetArea.center_x
ny = nuggetArea.center_y
self.tutorial.restart_tutorial_step(mouse_x, mouse_y, nx, ny)
# do not call this timer again
return False
def on_zoom_change(self, state):
""" Do something according to specific zoom states (E.g. the nugget is only visible at maximum zoom.) """
# As of 2012-08-11 there seems to be no "gcomrpis way" to change the mouse cursor to
# a individual png. So we can't change to a pickaxe. :(
if self.is_tutorial_enabled:
if self.tutorial.get_tutorial_state() == 'move to':
self.restart_tutorial_move_to(self.last_mouse_pos_x, self.last_mouse_pos_y)
if state == 'min':
self.nugget.hide()
if self.need_new_nugget:
# nugget has been collected
if self.is_tutorial_enabled:
self.tutorial.stop()
self.place_new_nugget()
elif state == 'mid':
self.nugget.hide()
elif state == 'max':
if self.is_tutorial_enabled:
# try to proceed to next tutorial step (from "zoom in")
self.tutorial.set_tutorial_state('click', False)
self.nugget.show()
else:
assert(False)
def on_button_press(self, item, target_item, event = None):
"""
The user clicked somewhere.
Since the spark sometimes hides the nugget, click events might be misleaded to the spark-image.
So we can't use "nugget.connect()". :(
"""
# ignore input while game paused
if self.is_game_paused():
return True
# don't react on double clicks
if event.type != gtk.gdk.BUTTON_PRESS:
return False
# coordinate space fix
# Event-coordinates are in the coordinate space of that item, which lies direct under the cursor.
# To "normalize" them, we translate those coordinates to canvas coordinate space.
(x, y) = self.gcomprisBoard.canvas.convert_from_item_space(target_item, event.x, event.y)
if self.nugget.is_visible():
# Was the mouse-click over the nugget?
# An alternative would be:
# gcomprisBoard.canvas.get_items_at(x, y, True)
# But we don't have the nugget_img object here.
if self.nugget.is_hit(x, y):
self.collect_nugget()
# we processed this event
return True
def collect_nugget(self):
""" The nugget was clicked, so collect it """
self.placer.remove_blocker(self.nugget)
self.nugget_count += 1
self.sparkling.animation_stop()
self.nugget.hide()
self.update_lorry()
if self.nugget_count >= self.nuggets_to_collect:
self.on_level_won()
else:
# The following sound is based on "metalpaint.wav" from "Tuxpaint" (GPL)
gcompris.sound.play_ogg("mining/pickaxe.ogg")
# we need to collect more nuggets, so lets place a new one
self.need_new_nugget = True
if self.is_tutorial_enabled:
self.tutorial.set_tutorial_state('zoom out', False)
def update_lorry(self):
""" Updates the nugget-collect-counter of the lorry in the lower right corner """
self.lorry.update_text(str(self.nugget_count) + "/" + str(self.nuggets_to_collect))
def on_level_won(self):
""" The user collected enough nuggets """
if self.is_tutorial_enabled:
self.tutorial.stop()
self.is_game_won = True;
# In GCompris version up to and including 12.5, there is a bug with the LION bonus:
# The bonus-constant "LION" does not exist, but had to be called by "GNU"
try:
# try to display bonus with correct constant
gcompris.bonus.display(gcompris.bonus.WIN, gcompris.bonus.LION)
except AttributeError:
# fail back to GCompris version <= 12.5
gcompris.bonus.display(gcompris.bonus.WIN, gcompris.bonus.GNU)
def pause(self, pause):
""" Called by GCompris, when the game is paused (e.g. config or bonus display) """
# When the bonus is displayed, this function is called first with pause(1) and then with pause(0)
if pause == 0:
# pause finished
self.__is_game_paused = False
if self.is_game_won:
# the bonus has been shown, so start a new game in the next level
self.set_level(self.get_next_level())
else:
# pause started
self.__is_game_paused = True
def is_game_paused(self):
""" Determine if the game is currently paused """
return self.__is_game_paused
def get_next_level(self):
""" Determines the next level """
next_level = self.gcomprisBoard.level + 1
if(next_level > self.gcomprisBoard.maxlevel):
next_level = self.gcomprisBoard.maxlevel
return next_level
def end_level(self):
""" Terminate the current level """
self.tutorial.stop()
self.sparkling.end()
self.placer.remove_all_blocker()
self.decorations.cleanup_viewport()
def end(self):
""" Terminate the activity """
self.end_level()
self.tutorial.end()
# Remove the root item removes all the others inside it
self.rootitem.remove()
# -----------------------
# GCompris interface functions, not needed by mining-activity
def config_start(self, profile):
pass
def config_stop(self):
pass
def key_press(self, keyval, commit_str, preedit_str):
pass
def ok(self):
pass
def repeat(self):
pass
class Lorry:
""" The lorry in the lower right corner of the screen, showing the number of collected nuggets """
# position of the lorry (in screen coordinates (800/520))
x = 730.0
y = 470.0
# center of the lorry in the svg file (used for rotation and positioning)
pivot_x = 2220
pivot_y = 1432
# the svg file is scaled with a factor of 3 (to enable nice zoom in) so we
# have to correct this for the lorry, which is not affected by zooming.
# Plus: the lorry designed in the svg file is a little bit to small for the
# text in default font to fit in.
scale = 0.4
def __init__(self, svghandle, rootitem):
"""
Constructor:
svghandle : handle of the svg file, containing graphics data
rootitem : the root item to attach goo-object to
"""
self.lorry_img = goocanvas.Svg(
parent = rootitem,
svg_handle = svghandle,
svg_id = "#LORRY",
)
self.text = goocanvas.Text(
parent = rootitem,
font = gcompris.skin.get_font("gcompris/board/medium"),
x = self.x + 9,
y = self.y + 2,
anchor = gtk.ANCHOR_CENTER,
fill_color = "white",
text = "-/-"
)
self.__update_transformation()
def get_bounds(self):
""" Get the bounds of the lorry image on the canvas """
return self.lorry_img.get_bounds()
def update_text(self, text):
""" Set a new text for the lorry to display """
self.text.set_properties(text = text)
def __update_transformation(self):
""" Updates the transformation matrix of the lorry """
m_center_to_origin = cairo.Matrix(1, 0, 0, 1, -self.pivot_x, -self.pivot_y)
m_scale = cairo.Matrix(self.scale, 0, 0, self.scale, 0, 0)
m_to_destination = cairo.Matrix(1, 0, 0, 1, self.x, self.y)
# combine all transformation matrices to one
matrix = m_center_to_origin * m_scale * m_to_destination
self.lorry_img.set_transform(matrix)
class Placer:
""" This class randomly places items on the screen and assures, that they do not overlap """
def __init__(self, activity):
"""
Constructor:
activity : the main activity class
"""
self.activity = activity
# initialize and document instance variables
self.blocking_areas = []
""" the internal list of blocking areas """
def place(self, item, place_callback):
"""
Place "item" on the screen.
item : the item to place (needs to implement "get_bounds()")
place_callback : The callback function to actually place the item (and modify, like rotate/scale).
Is called with parameters (item, x, y).
"""
area = Area(item.get_bounds())
width_half = int(area.width / 2.0)
height_half = int(area.height / 2.0)
safety_counter = 0
while True:
# get new random position for the item
x = random.randrange(width_half, gcompris.BOARD_WIDTH - width_half) * self.activity.source_image_scale
y = random.randrange(height_half, gcompris.BOARD_HEIGHT - height_half) * self.activity.source_image_scale
# place the item to this position
place_callback(item, x, y)
# check for overlapping objects
if not self.__does_object_collide_with_registered_ones(item):
# we found a valid position without collisions
# lets remember the positioned item...
self.add_blocker(item);
# ... and end the search for a valid position
break
safety_counter += 1
if safety_counter > 20:
# We tried to place this object many times, but could not find a valid position.
# Seems to be very difficult with this amount of objects.
# Since an invalid position (= overlapping objects) is way better than a frozen
# application, we exit this while loop with an "invalid" position.
print("Warning: safety_counter reached maximum!")
self.add_blocker(item);
break
def add_blocker(self, blocking_area):
"""
Add a new blocking area to the internal list of blocking areas
blocking_area: Object that implement method get_bounds() (like goocanvas.Item.get_bounds())
"""
self.blocking_areas.append(blocking_area)
def remove_blocker(self, blocking_area):
""" Removes the given blocking area from the internal list of blocking areas """
for i in range(len(self.blocking_areas)):
if self.blocking_areas[i] == blocking_area:
# this is the blocking_area, we are looking for, so remove it from the list
self.blocking_areas.pop(i)
return
print("Warning: blocking-area not in list: " + str(blocking_area))
def remove_all_blocker(self):
""" Removes all blocker from the internal list """
self.blocking_areas = []
def __does_object_collide_with_registered_ones(self, asked_object):
""" Tests whether any registered object collides with the asked_object """
area_a = Area(asked_object.get_bounds())
for blocking_area in self.blocking_areas:
area_r = Area(blocking_area.get_bounds())
# collision on x-axis?
if math.fabs(area_a.center_x - area_r.center_x) <= (area_r.width + area_a.width) / 2.0:
# collision on y-axis?
if math.fabs(area_a.center_y - area_r.center_y) <= (area_r.height + area_a.height) / 2.0:
# collision!
return True
# no collision
return False
class Viewport:
""" The viewport handles zooming in and out with the appropriate translation """
def __init__(self, activity, parent):
"""
Constructor:
activity : the main activity object
parent : the parent GooCanvas item to add our gc_group
"""
# initialize and document instance variables
self.source_image_scale = activity.source_image_scale
""" see documentation in Gcompris_mining """
self.x = 0
""" viewport x translation """
self.y = 0
""" viewport y translation """
self.scale = 1.0
""" current viewport scale / zoom """
self.scale_min = 1.0 / self.source_image_scale
""" The limit to max zoom out, while still filling all the screen with the rockwall. """
self.scale_max = None
"""
The limit to max zoom in.
Try to keep scale_max reachable by scale_min * zoom_factor ^ n (with n in [1, 2, 3, 4, ...[)
This value is set up in reset()
"""
self.zoom_factor = None
""" The factor to zoom on each zoom event. This value is set up in reset() """
self.gc_group = goocanvas.Group(parent = parent)
""" The GooCanvas group, which holds everything that is affected by zooming """
self.gcomprisBoard = activity.gcomprisBoard
self.cb_zoom_change = activity.on_zoom_change
self.is_game_paused = activity.is_game_paused
self.gc_group.connect("scroll_event", self.__on_scroll)
self.nugget_blocker = (
BlockingArea(0, 0, 800, 42), # top
BlockingArea(758, 0, 800, 520), # right
BlockingArea(0, 510, 800, 520), # bottom
BlockingArea(0, 0, 42, 520) # left
)
def reset(self, level):
""" Reset the viewport and set zooming parameters according to current level """
# in general: higher level means more zooming
if level == 1:
self.scale_max = 0.6000
self.zoom_factor = 1.3417 # 2 zoom events from min to max
elif level == 2:
self.scale_max = 0.8000
self.zoom_factor = 1.2447 # 4 zoom events from min to max
elif level == 3:
self.scale_max = 1.0000
self.zoom_factor = 1.2010 # 6 zoom events from min to max
else:
print("Warning: No level specific zooming values defined for level %i! Keeping current settings." % level)
# zoom out max
self.scale = self.scale_min
self.x = 0
self.y = 0
self.__update_transformation()
def get_gc_group(self):
""" get the GooCanvas group, which holds everything that is affected by zooming """
return self.gc_group
def get_nugget_blocker(self):
""" Get the viewport's blocking areas, where no nugget should be placed """
return self.nugget_blocker
def __on_scroll(self, item, target_item, event = None):
"""
Called at scroll (= zoom) events
item - The element connected with this callback function
target_item - The element under the cursor
event - gtk.gdk.Event
"""
# ignore input while game paused
if self.is_game_paused():
return True
assert(event.type == gtk.gdk.SCROLL)
###
# coordinate space fix
# Event-coordinates are in the coordinate space of that item, which lies direct under the cursor.
# So if the user scrolls over the spark, for example, we have to translate those coordinates to
# the coordinate space of our gc_group.
(x, y) = self.gcomprisBoard.canvas.convert_from_item_space(target_item, event.x, event.y)
(x, y) = self.gcomprisBoard.canvas.convert_to_item_space(self.gc_group, x, y)
###
# remember old values for calculation
old_scale = self.scale
old_x = self.x
old_y = self.y
###
# calculate the new scale factor
if event.direction == gtk.gdk.SCROLL_DOWN:
# zoom out
self.scale /= self.zoom_factor
if self.scale < self.scale_min:
self.scale = self.scale_min
elif event.direction == gtk.gdk.SCROLL_UP:
# zoom in
self.scale *= self.zoom_factor
if self.scale > self.scale_max:
self.scale = self.scale_max
else:
# We don't accept SRCOLL_RIGHT and LEFT
return True
###
# The pixel under the cursor shall be fixed, while everything else zooms in/out
# (we zoom at the mouse position), so we have to calculate a new position for
# gc_group (displaying primarily the rockwall image).
new_x = old_x + x * (old_scale - self.scale)
new_y = old_y + y * (old_scale - self.scale)
###
# Checks to prevent empty areas on any side of the rockwall
min_x = gcompris.BOARD_WIDTH * (1 - self.scale * self.source_image_scale)
min_y = gcompris.BOARD_HEIGHT * (1 - self.scale * self.source_image_scale)
# block uncovered area on the left
if new_x > 0:
new_x = 0
# block uncovered area on the top
if new_y > 0:
new_y = 0
# block uncovered area on the right
if new_x < min_x:
new_x = min_x
# block uncovered area on the bottom
if new_y < min_y:
new_y = min_y
###
# apply new values
self.x = new_x
self.y = new_y
self.__update_transformation()
###
# inform main class about zoom change
if old_scale != self.scale:
if self.scale == self.scale_max:
state = 'max'
elif self.scale == self.scale_min:
state = 'min'
else:
state = 'mid'
# call the callback function of main class
self.cb_zoom_change(state)
return True
def __update_transformation(self):
""" Update the viewport's transformation matrix """
self.gc_group.set_simple_transform(self.x, self.y, self.scale, 0)
class Decorations:
"""
This class handles decorations, like stones. They have the meaning of:
- make every level look a bit different
- help orienting, while scrolling
"""
# our decoration types in the svg file
decoration_types = (
{
'svg_id': '#STONE1',
'svg_x': 500,
'svg_y': 1300
},
{
'svg_id': '#STONE2',
'svg_x': 1000,
'svg_y': 1300
},
{
'svg_id': '#STONE3',
'svg_x': 1500,
'svg_y': 1300
},
{
'svg_id': '#STONE4',
'svg_x': 2000,
'svg_y': 1300
},
)
def __init__(self, svghandle, gc_group, placer):
"""
Constructor:
- svghandle : handle to the svg file, to load the decoration pictures
- gc_group : our viewport's gc_group, the decorations are attached to
- placer : reference to the Placer object
"""
self.svghandle = svghandle
self.placer = placer
##
# initialize and document instance variables
self.viewport_gc_group = gc_group
""" The viewport's GooCanvas Group to add our decoration_group to """
self.decoration_group = None
""" A goocanvas group, that holds all our decoration, so we can easily remove them, by removing only this group. """
self.current_decoration_id = None
""" ID of the decoration type, currently being placed. (Used to overcome callback bounderies) """
def decorate_viewport(self, number_of_decorations):
""" Fill the viewport with some decorations """
assert(self.decoration_group == None)
self.decoration_group = goocanvas.Group(parent = self.viewport_gc_group)
for i in range(number_of_decorations):
# select random decoration
self.current_decoration_id = random.randrange(4)
svg_id = self.decoration_types[self.current_decoration_id]['svg_id']
# create decoration
decoration = goocanvas.Svg(
parent = self.decoration_group,
svg_handle = self.svghandle,
svg_id = svg_id,
)
self.placer.place(decoration, self.__place_decoration)
def __place_decoration(self, decoration, x, y):
""" Updates the transformation of the decoration to the new coordinates. Rotation and scale are varied. """
svg_x = self.decoration_types[self.current_decoration_id]['svg_x']
svg_y = self.decoration_types[self.current_decoration_id]['svg_y']
# scale between 0.5 and 2.0
scale = math.pow(1.2, random.randrange(-4, +4))
rot = random.randrange(-20, +20)
# we need those values more than once, so lets remember them
a = math.radians(rot)
cos_a = math.cos(a)
sin_a = math.sin(a)
# create the transformation matrices
m_center_to_origin = cairo.Matrix(1, 0, 0, 1, -svg_x, -svg_y)
m_scale = cairo.Matrix(scale, 0, 0, scale, 0, 0)
m_rotate = cairo.Matrix(cos_a, sin_a, -sin_a, cos_a, 0, 0)
m_to_destination = cairo.Matrix(1, 0, 0, 1, x, y)
# combine all transformation matrices to one
matrix = m_center_to_origin * m_scale * m_rotate * m_to_destination
decoration.set_transform(matrix)
def cleanup_viewport(self):
""" Remove all decorations from the viewport """
if self.decoration_group != None:
self.decoration_group.remove()
self.decoration_group = None
class Nugget:
""" The gold nugget """
# center of the spark in the svg file (used for rotation and positioning)
pivot_x = 1000
pivot_y = 800
def __init__(self, svghandle, parent):
"""
Constructor:
svghandle : handle to the svg file, holding the pictures
parent : GooCanvas parent item of the gold nugget
"""
# initialize and document instance variables
self.x = 0.0
""" x position of the nugget (in the rockwall/gc_group) """
self.y = 0.0
""" y position of the nugget (in the rockwall/gc_group) """
self.nugget_img = goocanvas.Svg(
parent = parent,
svg_handle = svghandle,
svg_id = "#NUGGET",
# start invisible, since x/y are not set properly yet
visibility = goocanvas.ITEM_INVISIBLE
)
""" picture of the nugget """
def reset(self, nugget, x, y):
"""
Move nugget to a new position.
nugget : we don't need it in this case, since there is only one nugget image in this class,
but the callback interface defines this parameter
x : new x position for the nugget image
y : new y position for the nugget image
"""
self.x = x
self.y = y
self.nugget_img.set_simple_transform(self.x - self.pivot_x, self.y - self.pivot_y, 1.0, 0)
def get_bounds(self):
""" Get the bounds of the nugget image on the canvas """
return self.nugget_img.get_bounds()
def show(self):
""" Display the nugget """
self.nugget_img.props.visibility = goocanvas.ITEM_VISIBLE
def hide(self):
""" Hide the nugget """
self.nugget_img.props.visibility = goocanvas.ITEM_INVISIBLE
def is_visible(self):
""" Tells whether the nugget is visible (True) or hidden (False) """
return self.nugget_img.props.visibility == goocanvas.ITEM_VISIBLE
def is_hit(self, x, y):
"""
Determines whether the given coordinates are in the nugget's bounding box.
x and y are the coordinates in the canvas coordinate space.
"""
# get the nugget's bounding box in canvas coordinate space
bounds = self.nugget_img.get_bounds()
if bounds.x1 <= x and x <= bounds.x2 and bounds.y1 <= y and y <= bounds.y2:
return True
return False
def get_sparkling_coordinates(self):
""" Get the coordinates, where the sparkling should appear """
return (self.x - 10, self.y - 10)
class Sparkling:
"""
Handles all "sparkling"-stuff of the gold nugget.
The sparkling animation consists of "sparkphases", the time when the spark can be seen,
and pauses, the time when the spark is hidden.
"""
# value to initialize the rotation delta with
rot_delta_init = 6
# the factor, the spark shrinks every animation step
scale_factor = 0.90
# minimum scale factor; if reached, the current spark-phase ends
scale_min = 0.4
# the scale factor to start a spark-phase with
scale_max = 1.0
# the time (in milliseconds) between two sparkling animation steps
timer_milliseconds = 30
# the number of timer-events, a pause lasts
pause_ticks_total = 25
# add some randomness to pause_ticks_total
pause_tick_variation = 10
# center of the spark in the svg file (used for rotation and positioning)
pivot_x = 600
pivot_y = 600
def __init__(self, svghandle, parent):
"""
Constructor:
svghandle : handle to the svg file, holding the pictures
parent : GooCanvas parent item of this spark
"""
# initialize and document instance variables
self.spark = goocanvas.Svg(
parent = parent,
svg_handle = svghandle,
svg_id = "#SPARK",
# start invisible, since x/y are not set properly yet
visibility = goocanvas.ITEM_INVISIBLE
)
""" the spark image in the GooCanvas """
self.x = 0.0
""" x position of the spark (in the rockwall/gc_group) """
self.y = 0.0
""" y position of the spark (in the rockwall/gc_group) """
self.angle = 0
""" rotation (in degrees) """
self.rot_delta = 0
""" the amount to rotate at every animation step """
self.scale = 0
""" the sparks current scale factor """
self.timer = None
""" the timer object, firing timeout-events for our animation """
self.pause_ticks_current = 0
""" counts the number of elapsed pause ticks between two spark-phases """
def end(self):
""" Our "destructor" """
if self.__is_animation_playing():
self.animation_stop()
def reset(self, x, y):
""" Prepare a new sparkling animation. """
self.x = x
self.y = y
self.pause_ticks_current = 0
# we turn rotation direction at every sparkphase
self.rot_delta = self.rot_delta_init
self.__sparkphase_start()
def __update_transformation(self):
""" Updates the transformation matrix of the spark (= calculate new picture of animation). """
# we need those values more than once, so lets remember them
a = math.radians(self.angle)
cos_a = math.cos(a)
sin_a = math.sin(a)
# create the transformation matrices
m_center_to_origin = cairo.Matrix(1, 0, 0, 1, -self.pivot_x, -self.pivot_y)
m_scale = cairo.Matrix(self.scale, 0, 0, self.scale, 0, 0)
m_rotate = cairo.Matrix(cos_a, sin_a, -sin_a, cos_a, 0, 0)
m_to_destination = cairo.Matrix(1, 0, 0, 1, self.x, self.y)
# combine all transformation matrices to one
matrix = m_center_to_origin * m_scale * m_rotate * m_to_destination
self.spark.set_transform(matrix)
def __show_spark(self):
""" Display the spark """
self.spark.props.visibility = goocanvas.ITEM_VISIBLE
def __hide_spark(self):
""" Hide the spark """
self.spark.props.visibility = goocanvas.ITEM_INVISIBLE
def __sparkphase_start(self):
""" Start a new sparkphase, showing the spark """
# setup new values for this sparkphase
self.angle += random.randrange(0, 359)
self.rot_delta *= -1
self.scale = self.scale_max
# show the spark at the new state
self.__update_transformation()
self.__show_spark()
def __calculate_new_state(self):
""" Calculate next animation step """
self.scale *= self.scale_factor
self.angle += self.rot_delta
def __animate(self):
""" Called every x milliseconds, to animate the sparkling """
if self.pause_ticks_current > 0:
# pause this tick
self.pause_ticks_current -= 1
if self.pause_ticks_current <= 0:
# pause ends
self.__sparkphase_start()
return True
# no pause
if self.scale < self.scale_min:
# start pause
self.pause_ticks_current = self.pause_ticks_total + random.randrange(-self.pause_tick_variation, self.pause_tick_variation)
self.__hide_spark()
return True
# normal sparkle animation
self.__calculate_new_state()
self.__update_transformation()
# call timeout again
return True
def animation_start(self):
""" Starts the sparkling animation """
assert(not self.__is_animation_playing())
self.timer = gobject.timeout_add(self.timer_milliseconds, self.__animate)
def animation_stop(self):
""" Stops the sparkling animation """
assert(self.__is_animation_playing())
gobject.source_remove(self.timer)
self.timer = None
self.__hide_spark()
def __is_animation_playing(self):
""" Tells us, if there is an animation running at the moment """
return self.timer != None
| keshashah/GCompris | src/mining-activity/mining.py | Python | gpl-2.0 | 38,746 |
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dojo.settings.settings')
app = Celery('dojo')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print(('Request: {0!r}'.format(self.request)))
| OWASP/django-DefectDojo | dojo/celery.py | Python | bsd-3-clause | 551 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis
import MDAnalysis.analysis.diffusionmap as diffusionmap
import numpy as np
import pytest
from MDAnalysisTests.datafiles import PDB, XTC
from numpy.testing import assert_array_almost_equal
@pytest.fixture(scope='module')
def u():
return MDAnalysis.Universe(PDB, XTC)
@pytest.fixture(scope='module')
def dist(u):
return diffusionmap.DistanceMatrix(u, select='backbone')
@pytest.fixture(scope='module')
def dmap(dist):
d_map = diffusionmap.DiffusionMap(dist)
d_map.run()
return d_map
def test_eg(dist, dmap):
eigvals = dmap.eigenvalues
# number of frames is trajectory is now 10 vs. 98
assert eigvals.shape == (dist.n_frames,)
# makes no sense to test values here, no physical meaning
def test_dist_weights(u):
backbone = u.select_atoms('backbone')
weights_atoms = np.ones(len(backbone.atoms))
dist = diffusionmap.DistanceMatrix(u,
select='backbone',
weights=weights_atoms)
dist.run(step=3)
dmap = diffusionmap.DiffusionMap(dist)
dmap.run()
assert_array_almost_equal(dmap.eigenvalues, [1, 1, 1, 1], 4)
assert_array_almost_equal(dmap._eigenvectors,
([[0, 0, 1, 0],
[0, 0, 0, 1],
[-.707, -.707, 0, 0],
[.707, -.707, 0, 0]]), 2)
def test_different_steps(u):
dmap = diffusionmap.DiffusionMap(u, select='backbone')
dmap.run(step=3)
assert dmap._eigenvectors.shape == (4, 4)
def test_transform(u, dmap):
eigvects = dmap._eigenvectors
n_eigenvectors = 4
dmap = diffusionmap.DiffusionMap(u)
dmap.run()
diffusion_space = dmap.transform(n_eigenvectors, 1)
assert diffusion_space.shape == (eigvects.shape[0], n_eigenvectors)
def test_long_traj(u):
with pytest.warns(UserWarning, match='The distance matrix is very large'):
dmap = diffusionmap.DiffusionMap(u)
dmap._dist_matrix.run(stop=1)
dmap._dist_matrix.n_frames = 5001
dmap.run()
def test_not_universe_error(u):
trj_only = u.trajectory
with pytest.raises(ValueError, match='U is not a Universe'):
diffusionmap.DiffusionMap(trj_only)
def test_DistanceMatrix_attr_warning(u):
dist = diffusionmap.DistanceMatrix(u, select='backbone').run(step=3)
wmsg = f"The `dist_matrix` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert getattr(dist, "dist_matrix") is dist.results.dist_matrix
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/analysis/test_diffusionmap.py | Python | gpl-2.0 | 3,663 |
#!/usr/bin/env python
# coding=utf-8
"""559. Permuted Matrices
https://projecteuler.net/problem=559
An **ascent** of a column j in a matrix occurs if the value of column j is
smaller than the value of column j+1 in all rows.
Let P(k, r, n) be the number of r x n matrices with the following properties:
* The rows are permutations of {1, 2, 3, ... , n}.
* Numbering the first column as 1, a column ascent occurs at column j<n **if and only if** j is not a multiple of k.
For example, P(1, 2, 3) = 19, P(2, 4, 6) = 65508751 and P(7, 5, 30) mod
1000000123 = 161858102.
Let Q(
n
) =$\, \displaystyle \sum_{k=1}^n\,$ P(
k
,
n
,
n
).
For example, Q(5) = 21879393751 and Q(50) mod 1000000123 = 819573537.
Find Q(50000) mod 1000000123.
"""
| openqt/algorithms | projecteuler/pe559-permuted-matrices.py | Python | gpl-3.0 | 754 |
#!/usr/bin/python
import sys
import urllib2
import csv
import re
import time
import traceback
from bs4 import BeautifulSoup
from difflib import SequenceMatcher
#################### Importing Liraries ########################
if ( len(sys.argv) != 3 ) :
print "Only 2 arguments are allowed"
quit()
if ( sys.argv[1] > sys.argv[2] ) :
print "Invalid Arguments"
quit()
#################### Checking Arguments ########################
MEASUREMENT_REGEX = r"[0-9\.]{4}"
REQUEST_URL = "http://www.igiworldwide.com/searchreport_postreq.php?r="
PARAM_1 = int(sys.argv[1])
PARAM_2 = int(sys.argv[2])
REPORT_NO_LIST = list(range(PARAM_1 , PARAM_2 + 1))
REQ_FEILDS = ["report_no","shape_cut","carat_weight","color_grade","clarity_grade","polish","symmetry","table_size","total_depth","fluorescence", "width", "length", "depth", "measurement", "shape", "cut", "cut_grade"]
LOG_FILENAME = "error.log"
#CSV_FILENAME = "csv_" + time.strftime("%d-%m-%Y_%H-%M-%S") + ".csv"
CSV_FILENAME = "test.csv" ## development only
##################### Global Constants #########################
def getValueFromDict(dict_param, str):
if str in dict_param:
return dict_param[str]
else:
return None
def populateRowsList(raw_rows):
rows_list = []
for row in raw_rows[1:] :
tmp = []
key = row.findAll('td')[0].getText().encode("utf-8").strip().lower()
value = row.findAll('td')[1].getText().encode("utf-8").strip().lower().replace('\xc2\xb0', '')
tmp.append( key )
tmp.append( value )
rows_list.append(tmp)
return rows_list
def reqListGenerater(dict_param):
tmp_list = []
for req in REQ_FEILDS :
tmp = dict_param[req]
if (tmp is not None):
if req == "carat_weight" :
tmp = tmp.replace('carat','').strip()
if (req == "table_size" or req == "total_depth"):
tmp = tmp.replace('%','').strip()
tmp_list.append(tmp)
return tmp_list
################### User Defined Functions ######################
csv_file_handler = csv.writer(open(CSV_FILENAME, "w"))
csv_file_handler.writerow(REQ_FEILDS)
for report_no in REPORT_NO_LIST :
raw_data = urllib2.urlopen(REQUEST_URL + str(report_no)).read()
html_soup = BeautifulSoup(raw_data, "html.parser")
table_data_soup = html_soup.find('table')
if table_data_soup is None:
print str(report_no) + " Failure"
continue
try:
rows_soup = table_data_soup.findAll('tr')
rows_list = populateRowsList(rows_soup)
rows_dict = dict(rows_list)
measurements_list = re.findall(MEASUREMENT_REGEX, rows_dict['measurements'])
shape_cut_list = rows_dict['shape and cut'].split()
req_rows_list = []
req_rows_dict = {
'report_no': getValueFromDict( rows_dict ,'report number' ),
'shape_cut': getValueFromDict( rows_dict , 'shape and cut' ),
'carat_weight': getValueFromDict( rows_dict , 'carat weight' ),
'color_grade': getValueFromDict( rows_dict , 'color grade' ),
'clarity_grade': getValueFromDict( rows_dict , 'clarity grade' ),
'polish': getValueFromDict( rows_dict , 'polish' ),
'symmetry': getValueFromDict( rows_dict , 'symmetry' ),
'table_size': getValueFromDict( rows_dict , 'table size' ),
'total_depth': getValueFromDict( rows_dict , 'total depth' ),
'fluorescence': getValueFromDict( rows_dict , 'fluorescence' ),
'width': measurements_list[0],
'length': measurements_list[1],
'depth': measurements_list[2],
'shape': shape_cut_list[0],
'cut': shape_cut_list[1],
'cut_grade': getValueFromDict( rows_dict , 'cut grade' ),
'measurement': getValueFromDict( rows_dict , 'measurements' )
}
req_rows_list = reqListGenerater(req_rows_dict)
csv_file_handler.writerow(req_rows_list)
print str(report_no) + " Success"
except Exception as ex:
try:
EX_FILE = "ex/" + str(report_no) + ".html"
file_handler = open(EX_FILE,"w")
file_handler.write(str(table_data_soup))
file_handler.close()
print str(report_no) + " Failure With Exception"
file_handler = open(LOG_FILENAME,"w")
file_handler.write(str(report_no) + " :--\n" + str(ex) + "\n\n" + str(traceback.format_exc()) + \
"-----------------------------------------------------------------------------------\n\n\n\n\n")
file_handler.close()
print traceback.format_exc()
except:
print "Unknown Error on " + str(report_no)
#################################################################
# print SequenceMatcher(None, "shailendra".lower(), "Shailendra".lower()).ratio()
| charlie1404/gopalhtmlparser | igiworld.py | Python | unlicense | 4,906 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_cspolicy_binding(base_resource) :
""" Binding class showing the cspolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._targetlbvserver = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._hits = 0
self._pipolicyhits = 0
self._rule = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
ur"""target vserver name.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
ur"""target vserver name.
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke flag.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke flag.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def rule(self) :
ur"""Rule.
"""
try :
return self._rule
except Exception as e:
raise e
@property
def pipolicyhits(self) :
ur"""Number of hits.
"""
try :
return self._pipolicyhits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_cspolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_cspolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_cspolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_cspolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch csvserver_cspolicy_binding resources.
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count csvserver_cspolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_cspolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_cspolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_cspolicy_binding = [csvserver_cspolicy_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_cspolicy_binding.py | Python | apache-2.0 | 10,784 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import throw, _
import frappe.defaults
from frappe.utils import cint, flt, get_fullname, cstr
from erpnext.utilities.doctype.address.address import get_address_display
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings import get_shopping_cart_settings
from frappe.utils.nestedset import get_root_of
class WebsitePriceListMissingError(frappe.ValidationError): pass
def set_cart_count(quotation=None):
if cint(frappe.db.get_singles_value("Shopping Cart Settings", "enabled")):
if not quotation:
quotation = _get_cart_quotation()
cart_count = cstr(len(quotation.get("items")))
if hasattr(frappe.local, "cookie_manager"):
frappe.local.cookie_manager.set_cookie("cart_count", cart_count)
@frappe.whitelist()
def get_cart_quotation(doc=None):
party = get_customer()
if not doc:
quotation = _get_cart_quotation(party)
doc = quotation
set_cart_count(quotation)
return {
"doc": decorate_quotation_doc(doc),
"addresses": [{"name": address.name, "display": address.display}
for address in get_address_docs(party=party)],
"shipping_rules": get_applicable_shipping_rules(party)
}
@frappe.whitelist()
def place_order():
quotation = _get_cart_quotation()
quotation.company = frappe.db.get_value("Shopping Cart Settings", None, "company")
for fieldname in ["customer_address", "shipping_address_name"]:
if not quotation.get(fieldname):
throw(_("{0} is required").format(quotation.meta.get_label(fieldname)))
quotation.flags.ignore_permissions = True
quotation.submit()
if quotation.lead:
# company used to create customer accounts
frappe.defaults.set_user_default("company", quotation.company)
from erpnext.selling.doctype.quotation.quotation import _make_sales_order
sales_order = frappe.get_doc(_make_sales_order(quotation.name, ignore_permissions=True))
for item in sales_order.get("items"):
item.reserved_warehouse = frappe.db.get_value("Item", item.item_code, "website_warehouse") or None
sales_order.flags.ignore_permissions = True
sales_order.insert()
sales_order.submit()
if hasattr(frappe.local, "cookie_manager"):
frappe.local.cookie_manager.delete_cookie("cart_count")
return sales_order.name
@frappe.whitelist()
def update_cart(item_code, qty, with_items=False):
quotation = _get_cart_quotation()
qty = flt(qty)
if qty == 0:
quotation.set("items", quotation.get("items", {"item_code": ["!=", item_code]}))
else:
quotation_items = quotation.get("items", {"item_code": item_code})
if not quotation_items:
quotation.append("items", {
"doctype": "Quotation Item",
"item_code": item_code,
"qty": qty
})
else:
quotation_items[0].qty = qty
apply_cart_settings(quotation=quotation)
quotation.flags.ignore_permissions = True
quotation.save()
set_cart_count(quotation)
if with_items:
context = get_cart_quotation(quotation)
return {
"items": frappe.render_template("templates/includes/cart/cart_items.html",
context),
"taxes": frappe.render_template("templates/includes/order/order_taxes.html",
context),
}
else:
return quotation.name
@frappe.whitelist()
def update_cart_address(address_fieldname, address_name):
quotation = _get_cart_quotation()
address_display = get_address_display(frappe.get_doc("Address", address_name).as_dict())
if address_fieldname == "shipping_address_name":
quotation.shipping_address_name = address_name
quotation.shipping_address = address_display
if not quotation.customer_address:
address_fieldname == "customer_address"
if address_fieldname == "customer_address":
quotation.customer_address = address_name
quotation.address_display = address_display
apply_cart_settings(quotation=quotation)
quotation.flags.ignore_permissions = True
quotation.save()
context = get_cart_quotation(quotation)
return {
"taxes": frappe.render_template("templates/includes/order/order_taxes.html",
context),
}
def guess_territory():
territory = None
geoip_country = frappe.session.get("session_country")
if geoip_country:
territory = frappe.db.get_value("Territory", geoip_country)
return territory or \
frappe.db.get_value("Shopping Cart Settings", None, "territory") or \
get_root_of("Territory")
def decorate_quotation_doc(doc):
for d in doc.get("items", []):
d.update(frappe.db.get_value("Item", d.item_code,
["thumbnail", "website_image", "description", "page_name"], as_dict=True))
return doc
def _get_cart_quotation(party=None):
if not party:
party = get_customer()
quotation = frappe.get_all("Quotation", fields=["name"], filters=
{party.doctype.lower(): party.name, "order_type": "Shopping Cart", "docstatus": 0},
order_by="modified desc", limit_page_length=1)
if quotation:
qdoc = frappe.get_doc("Quotation", quotation[0].name)
else:
qdoc = frappe.get_doc({
"doctype": "Quotation",
"naming_series": get_shopping_cart_settings().quotation_series or "QTN-CART-",
"quotation_to": party.doctype,
"company": frappe.db.get_value("Shopping Cart Settings", None, "company"),
"order_type": "Shopping Cart",
"status": "Draft",
"docstatus": 0,
"__islocal": 1,
(party.doctype.lower()): party.name
})
qdoc.contact_person = frappe.db.get_value("Contact", {"email_id": frappe.session.user,
"customer": party.name})
qdoc.contact_email = frappe.session.user
qdoc.flags.ignore_permissions = True
qdoc.run_method("set_missing_values")
apply_cart_settings(party, qdoc)
return qdoc
def update_party(fullname, company_name=None, mobile_no=None, phone=None):
party = get_customer()
party.customer_name = company_name or fullname
party.customer_type == "Company" if company_name else "Individual"
contact_name = frappe.db.get_value("Contact", {"email_id": frappe.session.user,
"customer": party.name})
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = fullname
contact.last_name = None
contact.customer_name = party.customer_name
contact.mobile_no = mobile_no
contact.phone = phone
contact.flags.ignore_permissions = True
contact.save()
party_doc = frappe.get_doc(party.as_dict())
party_doc.flags.ignore_permissions = True
party_doc.save()
qdoc = _get_cart_quotation(party)
if not qdoc.get("__islocal"):
qdoc.customer_name = company_name or fullname
qdoc.run_method("set_missing_lead_customer_details")
qdoc.flags.ignore_permissions = True
qdoc.save()
def apply_cart_settings(party=None, quotation=None):
if not party:
party = get_customer()
if not quotation:
quotation = _get_cart_quotation(party)
cart_settings = frappe.get_doc("Shopping Cart Settings")
set_price_list_and_rate(quotation, cart_settings)
quotation.run_method("calculate_taxes_and_totals")
set_taxes(quotation, cart_settings)
_apply_shipping_rule(party, quotation, cart_settings)
def set_price_list_and_rate(quotation, cart_settings):
"""set price list based on billing territory"""
_set_price_list(quotation, cart_settings)
# reset values
quotation.price_list_currency = quotation.currency = \
quotation.plc_conversion_rate = quotation.conversion_rate = None
for item in quotation.get("items"):
item.price_list_rate = item.discount_percentage = item.rate = item.amount = None
# refetch values
quotation.run_method("set_price_list_and_item_details")
if hasattr(frappe.local, "cookie_manager"):
# set it in cookies for using in product page
frappe.local.cookie_manager.set_cookie("selling_price_list", quotation.selling_price_list)
def _set_price_list(quotation, cart_settings):
"""Set price list based on customer or shopping cart default"""
if quotation.selling_price_list:
return
# check if customer price list exists
selling_price_list = None
if quotation.customer:
from erpnext.accounts.party import get_default_price_list
selling_price_list = get_default_price_list(frappe.get_doc("Customer", quotation.customer))
# else check for territory based price list
if not selling_price_list:
selling_price_list = cart_settings.price_list
quotation.selling_price_list = selling_price_list
def set_taxes(quotation, cart_settings):
"""set taxes based on billing territory"""
from erpnext.accounts.party import set_taxes
customer_group = frappe.db.get_value("Customer", quotation.customer, "customer_group")
quotation.taxes_and_charges = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, customer_group, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
#
# # clear table
quotation.set("taxes", [])
#
# # append taxes
quotation.append_taxes_from_master()
def get_customer(user=None):
if not user:
user = frappe.session.user
customer = frappe.db.get_value("Contact", {"email_id": user}, "customer")
if customer:
return frappe.get_doc("Customer", customer)
else:
customer = frappe.new_doc("Customer")
fullname = get_fullname(user)
customer.update({
"customer_name": fullname,
"customer_type": "Individual",
"customer_group": get_shopping_cart_settings().default_customer_group,
"territory": get_root_of("Territory")
})
customer.flags.ignore_mandatory = True
customer.insert(ignore_permissions=True)
contact = frappe.new_doc("Contact")
contact.update({
"customer": customer.name,
"first_name": fullname,
"email_id": user
})
contact.flags.ignore_mandatory = True
contact.insert(ignore_permissions=True)
return customer
def get_address_docs(doctype=None, txt=None, filters=None, limit_start=0, limit_page_length=20, party=None):
if not party:
party = get_customer()
address_docs = frappe.db.sql("""select * from `tabAddress`
where `{0}`=%s order by name limit {1}, {2}""".format(party.doctype.lower(),
limit_start, limit_page_length), party.name,
as_dict=True, update={"doctype": "Address"})
for address in address_docs:
address.display = get_address_display(address)
return address_docs
@frappe.whitelist()
def apply_shipping_rule(shipping_rule):
quotation = _get_cart_quotation()
quotation.shipping_rule = shipping_rule
apply_cart_settings(quotation=quotation)
quotation.flags.ignore_permissions = True
quotation.save()
return get_cart_quotation(quotation)
def _apply_shipping_rule(party=None, quotation=None, cart_settings=None):
if not quotation.shipping_rule:
shipping_rules = get_shipping_rules(quotation, cart_settings)
if not shipping_rules:
return
elif quotation.shipping_rule not in shipping_rules:
quotation.shipping_rule = shipping_rules[0]
if quotation.shipping_rule:
quotation.run_method("apply_shipping_rule")
quotation.run_method("calculate_taxes_and_totals")
def get_applicable_shipping_rules(party=None, quotation=None):
shipping_rules = get_shipping_rules(quotation)
if shipping_rules:
rule_label_map = frappe.db.get_values("Shipping Rule", shipping_rules, "label")
# we need this in sorted order as per the position of the rule in the settings page
return [[rule, rule_label_map.get(rule)] for rule in shipping_rules]
def get_shipping_rules(quotation=None, cart_settings=None):
if not quotation:
quotation = _get_cart_quotation()
shipping_rules = []
if quotation.shipping_address_name:
country = frappe.db.get_value("Address", quotation.shipping_address_name, "country")
if country:
shipping_rules = frappe.db.sql_list("""select distinct sr.name
from `tabShipping Rule Country` src, `tabShipping Rule` sr
where src.country = %s and
sr.disabled != 1 and sr.name = src.parent""", country)
return shipping_rules
def get_address_territory(address_name):
"""Tries to match city, state and country of address to existing territory"""
territory = None
if address_name:
address_fields = frappe.db.get_value("Address", address_name,
["city", "state", "country"])
for value in address_fields:
territory = frappe.db.get_value("Territory", value)
if territory:
break
return territory
| hatwar/buyback-erpnext | erpnext/shopping_cart/cart.py | Python | agpl-3.0 | 12,077 |
#!/usr/bin/env python
#
# Copyright (c) 2017, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import abc
import logging
import serial
import time
ABC = abc.ABC if sys.version_info >= (3, 4) else abc.ABCMeta('ABC', (), {})
logger = logging.getLogger(__name__)
class RfShieldController(ABC):
@abc.abstractmethod
def shield(self):
pass
@abc.abstractmethod
def unshield(self):
pass
@abc.abstractmethod
def __enter__(self):
pass
@abc.abstractmethod
def __exit__(self, *args, **kwargs):
pass
class RfSwitchController(RfShieldController):
def __init__(self, channel, port):
self._channel = channel
self._port = port
self._conn = None
def shield(self):
self._display_string('CLOSE {}'.format(self._channel))
self._write('CLOSE (@{})'.format(self._channel))
def unshield(self):
self._display_string('OPEN {}'.format(self._channel))
self._write('OPEN (@{})'.format(self._channel))
def _write(self, data):
return self._conn.write('{}\r\n'.format(data))
def _display_string(self, string):
self._write('DIAGNOSTIC:DISPLAY "{}"'.format(string))
def __enter__(self):
self._conn = serial.Serial(self._port, 9600)
if not self._conn.isOpen():
self._conn.open()
self._write('')
time.sleep(1)
return self
def __exit__(self, *args, **kwargs):
if self._conn:
self._conn.close()
self._conn = None
CONTROLLERS = {'RF_SWITCH': RfSwitchController}
def get_rf_shield_controller(shield_type, params):
if shield_type in CONTROLLERS:
return CONTROLLERS[shield_type](**params)
logger.exception(
'Unknown RF shield controller type: {}'.format(shield_type))
| librasungirl/openthread | tools/harness-automation/autothreadharness/rf_shield_controller.py | Python | bsd-3-clause | 3,315 |
"""
This file is about errors in Python files and not about exception handling in
Jedi.
"""
def parso_to_jedi_errors(grammar, module_node):
return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
class SyntaxError:
"""
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
"""
def __init__(self, parso_error):
self._parso_error = parso_error
@property
def line(self):
"""The line where the error starts (starting with 1)."""
return self._parso_error.start_pos[0]
@property
def column(self):
"""The column where the error starts (starting with 0)."""
return self._parso_error.start_pos[1]
@property
def until_line(self):
"""The line where the error ends (starting with 1)."""
return self._parso_error.end_pos[0]
@property
def until_column(self):
"""The column where the error ends (starting with 0)."""
return self._parso_error.end_pos[1]
def get_message(self):
return self._parso_error.message
def __repr__(self):
return '<%s from=%s to=%s>' % (
self.__class__.__name__,
self._parso_error.start_pos,
self._parso_error.end_pos,
)
| snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/jedi/api/errors.py | Python | gpl-3.0 | 1,253 |
# python library for gridding, degridding, autocalibrated sensitivity map, and CG SENSE in 2D
#
# Code modified based on code from Nick Zwart at BNI
# author: Mike Schar
import numpy as np
def window2(shape, windowpct=100.0, widthpct=100.0, stopVal=0, passVal=1):
# 2D hanning window just like shapes
# OUTPUT: 2D float32 circularly symmetric hanning
import numpy as np
# window function width
bnd = 100.0/widthpct
# generate coords for each dimension
x = np.linspace(-bnd, bnd, shape[-1], endpoint=(shape[-1] % 2 != 0))
y = np.linspace(-bnd, bnd, shape[-2], endpoint=(shape[-2] % 2 != 0))
# create a 2D grid with coordinates then get radial coords
xx, yy = np.meshgrid(x,y)
radius = np.sqrt(xx*xx + yy*yy)
# calculate hanning
windIdx = radius <= 1.0
passIdx = radius <= (1.0 - (windowpct/100.0))
func = 0.5 * (1.0 - np.cos(np.pi * (1.0 - radius[windIdx]) / (windowpct/100.0)))
# populate output array
out = np.zeros(shape, dtype=np.float32)
out[windIdx] = stopVal + func * (passVal - stopVal)
out[passIdx] = passVal
return out
def rolloff2D(mtx_xy, kernel, clamp_min_percent=5):
# mtx_xy: int
import numpy as np
import bni.gridding.grid_kaiser as gd
# grid one point at k_0
dx = dy = 0.0
coords = np.array([0,0], dtype='float32')
data = np.array([1.0], dtype='complex64')
weights = np.array([1.0], dtype='float32')
outdim = np.array([mtx_xy, mtx_xy],dtype=np.int64)
# grid -> fft -> |x|
out = np.abs(fft2D(gd.grid(coords, data, weights, kernel, outdim, dx, dy)))
# clamp the lowest values to a percentage of the max
clamp = out.max() * clamp_min_percent/100.0
out[out < clamp] = clamp
# invert
return 1.0/out
def kaiserbessel_kernel(kernel_table_size, oversampling_ratio):
# Generate a Kaiser-Bessel kernel function
# OUTPUT: 1D kernel table for radius squared
import bni.gridding.grid_kaiser as dg
kernel_dim = np.array([kernel_table_size],dtype=np.int64)
return dg.kaiserbessel_kernel(kernel_dim, np.float64(oversampling_ratio))
def fft2D(data, dir=0, out_dims_fft=[]):
# data: np.complex64
# dir: int (0 or 1)
# outdims = [nr_coils, extra_dim2, extra_dim1, mtx, mtx]
import core.math.fft as corefft
# generate output dim size array
# fortran dimension ordering
if len(out_dims_fft):
outdims = out_dims_fft.copy()
else:
outdims = list(data.shape)
outdims.reverse()
outdims = np.array(outdims, dtype=np.int64)
# load fft arguments
kwargs = {}
kwargs['dir'] = dir
# transform
kwargs['dim1'] = 1
kwargs['dim2'] = 1
kwargs['dim3'] = 0
kwargs['dim4'] = 0
kwargs['dim5'] = 0
return corefft.fftw(data, outdims, **kwargs)
def grid2D(data, coords, weights, kernel, out_dims):
# data: np.float32
# coords: np.complex64
# weights: np.float32
# kernel: np.float64
# outdims = [nr_coils, extra_dim2, extra_dim1, mtx_xy, mtx_xy]: int
import bni.gridding.grid_kaiser as bni_grid
[nr_coils, extra_dim2, extra_dim1, mtx_xy, nr_arms, nr_points] = out_dims
# off-center in pixels.
dx = dy = 0.
# gridded kspace
gridded_kspace = np.zeros([nr_coils, extra_dim2, extra_dim1, mtx_xy, mtx_xy], dtype=data.dtype)
# tell the grid routine what shape to produce
outdim = np.array([mtx_xy,mtx_xy], dtype=np.int64)
# coordinate dimensions
if coords.shape[0] == 1:
same_coords_for_all_slices_and_dynamics = True
else:
same_coords_for_all_slices_and_dynamics = False
# grid all slices
dx = dy = 0.
for extra1 in range(extra_dim1):
if same_coords_for_all_slices_and_dynamics:
extra1_coords = 0
else:
extra1_coords = extra1
for extra2 in range(extra_dim2):
for coil in range(nr_coils):
gridded_kspace[coil,extra2,extra1,:,:] = bni_grid.grid(coords[extra1_coords,:,:,:], data[coil,extra2,extra1,:,:], weights[extra1_coords,:,:], kernel, outdim, dx, dy)
return gridded_kspace
def autocalibrationB1Maps2D(images, taper=50, width=10, mask_floor=1, average_csm=0):
# dimensions
mtx = images.shape[-1]
extra_dim1 = images.shape[-3]
extra_dim2 = images.shape[-4]
nr_coils = images.shape[-5]
# Dynamic data - average all dynamics for csm
if ( (extra_dim1 > 1) and (average_csm) ):
images_for_csm = images.sum(axis=-3)
images_for_csm.shape = [nr_coils,extra_dim2,1,mtx,mtx]
else:
images_for_csm = images
# generate window function for blurring image data
win = window2(images_for_csm.shape[-2:], windowpct=taper, widthpct=width)
# apply kspace filter
kspace = fft2D(images_for_csm, dir=1)
kspace *= win
# transform back into image space and normalize
csm = fft2D(kspace, dir=0)
rms = np.sqrt(np.sum(np.abs(csm)**2, axis=0))
csm = csm / rms
# zero out points that are below the mask threshold
thresh = mask_floor/100.0 * rms.max()
csm *= rms > thresh
# Dynamic data - average all dynamics for csm - asign the average to all dynamics
if ( (extra_dim1 > 1) and (average_csm) ):
out = np.zeros(images.shape, np.complex64)
for coil in range(nr_coils):
for extra2 in range(extra_dim2):
for extra1 in range(extra_dim1):
out[coil,extra2,extra1,:,:] = csm[coil,extra2,0,:,:]
else:
out=csm
return out
def degrid2D(data, coords, kernel, outdims):
# data: np.float32
# coords: np.complex64
# weights: np.float32
# kernel: np.float64
# outdims = [nr_coils, extra_dim2, extra_dim1, mtx_xy, mtx_xy]: int
import bni.gridding.grid_kaiser as bni_grid
[nr_coils, extra_dim2, extra_dim1, nr_arms, nr_points] = outdims
# coordinate dimensions
if coords.shape[0] == 1:
same_coords_for_all_slices_and_dynamics = True
else:
same_coords_for_all_slices_and_dynamics = False
# gridded kspace
degridded_kspace = np.zeros([nr_coils, extra_dim2, extra_dim1, nr_arms, nr_points], dtype=data.dtype)
# degrid all slices
for extra1 in range(extra_dim1):
if same_coords_for_all_slices_and_dynamics:
extra1_coords = 0
else:
extra1_coords = extra1
for extra2 in range(extra_dim2):
for coil in range(nr_coils):
degridded_kspace[coil,extra2,extra1,:,:] = bni_grid.degrid(coords[extra1_coords,:,:,:], data[coil,extra2,extra1,:,:], kernel)
return degridded_kspace
| gpilab/bni-nodes | gridding/Kaiser2D_utils.py | Python | bsd-3-clause | 6,648 |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.atoms as at
from cvxpy.expressions.constants import Constant
from cvxpy.expressions.variables import Variable
from cvxpy.problems.objective import *
from cvxpy.problems.problem import Problem
import cvxpy.interface.matrix_utilities as intf
from cvxopt import matrix
import scipy.sparse as sp
import unittest
class TestProblem(unittest.TestCase):
""" Unit tests for the expression/expression module. """
def setUp(self):
self.a = Variable(name='a')
self.b = Variable(name='b')
self.c = Variable(name='c')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable(2,2,name='A')
self.B = Variable(2,2,name='B')
self.C = Variable(3,2,name='C')
# Overriden method to handle lists and lower accuracy.
def assertAlmostEqual(self, a, b, interface=intf.DEFAULT_INTF):
try:
a = list(a)
b = list(b)
for i in range(len(a)):
self.assertAlmostEqual(a[i], b[i])
except Exception:
super(TestProblem, self).assertAlmostEqual(a,b,places=3)
def test_large_sum(self):
"""Test large number of variables summed.
"""
for n in [10, 20, 30, 40, 50]:
A = matrix(range(n*n), (n,n))
x = Variable(n,n)
p = Problem(Minimize(at.sum_entries(x)), [x >= A])
result = p.solve()
answer = n*n*(n*n+1)/2 - n*n
print(result - answer)
self.assertAlmostEqual(result, answer)
def test_large_square(self):
"""Test large number of variables squared.
"""
for n in [10, 20, 30, 40, 50]:
A = matrix(range(n*n), (n,n))
x = Variable(n,n)
p = Problem(Minimize(at.square(x[0, 0])),
[x >= A])
result = p.solve()
self.assertAlmostEqual(result, 0)
def test_sdp(self):
"""Test a problem with semidefinite cones.
"""
a = sp.rand(100,100,.1, random_state=1)
a = a.todense()
X = Variable(100,100)
obj = at.norm(X, "nuc") + at.norm(X-a,'fro')
p = Problem(Minimize(obj))
p.solve(solver="SCS")
def test_large_sdp(self):
"""Test for bug where large SDP caused integer overflow in CVXcanon.
"""
SHAPE = (256, 256)
rows = SHAPE[0]
cols = SHAPE[1]
X = Variable(*SHAPE)
Z = Variable(rows+cols, rows+cols)
prob = Problem(Minimize(0.5*at.trace(Z)),
[X[0,0] >= 1, Z[0:rows,rows:rows+cols] == X, Z >> 0, Z == Z.T])
prob.solve(solver="SCS")
self.assertAlmostEqual(prob.value, 1.0)
| mwytock/cvxpy | cvxpy/performance_tests/test_robustness.py | Python | gpl-3.0 | 3,392 |
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
# pylint: disable-msg=W0401,C0111,W0614,W0622,C0301,C0321,C0324,C0103,W0621,R0903
# (the code style is for documentation, not "real")
#@PydevCodeAnalysisIgnore
'''
Examples from the documentation.
'''
from logging import basicConfig, DEBUG, ERROR
from gc import collect
from random import random
from timeit import timeit
from lepl import *
from lepl._example.support import Example
from lepl.support.lib import fmt
NUMBER = 10
REPEAT = 3
def matcher():
'''A simple parser we'll use as an example.'''
class Term(List): pass
class Factor(List): pass
class Expression(List): pass
expr = Delayed()
number = Float() >> float
with DroppedSpace():
term = number | '(' & expr & ')' > Term
muldiv = Any('*/')
factor = term & (muldiv & term)[:] > Factor
addsub = Any('+-')
expr += factor & (addsub & factor)[:] > Expression
line = expr & Eos()
return line
if __name__ == '__main__':
basicConfig(level=ERROR)
m = matcher()
print_timing(lambda: fmt('{0:4.2f} + {1:4.2f} * ({2:4.2f} + {3:4.2f} - {4:4.2f})',
random(), random(), random(), random(), random()),
{'default': m.clone(),
'clear': m.clone().config.clear().matcher,
'no memo': m.clone().config.no_memoize().matcher,
'low memory': m.clone().config.low_memory().matcher,
'nfa': m.clone().config.clear().compile_to_nfa().matcher,
'dfa': m.clone().config.clear().compile_to_dfa().matcher,
're': m.clone().config.clear().compile_to_re().matcher})
# pylint: disable-msg=E0601
# (pylint parsing bug?)
class PerformanceExample(Example):
def test_parse(self):
# run this to make sure nothing changes
m = matcher()
parsers = [m.clone(),
m.clone().config.clear().matcher,
m.clone().config.no_memoize().matcher,
m.clone().config.auto_memoize().matcher,
m.clone().config.low_memory().matcher]
examples = [(lambda: parser.parse('1.23e4 + 2.34e5 * (3.45e6 + 4.56e7 - 5.67e8)')[0],
"""Expression
+- Factor
| `- Term
| `- 12300.0
+- '+'
`- Factor
+- Term
| `- 234000.0
+- '*'
`- Term
+- '('
+- Expression
| +- Factor
| | `- Term
| | `- 3450000.0
| +- '+'
| +- Factor
| | `- Term
| | `- 45600000.0
| +- '-'
| `- Factor
| `- Term
| `- 567000000.0
`- ')'""") for parser in parsers]
self.examples(examples)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/lepl/_example/performance.py | Python | agpl-3.0 | 4,295 |
# -*- coding: utf-8 -*-
#
# aBRI documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 30 13:24:06 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, inspect
# Add root of the tree --> go to place before docs
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
if not root_dir in sys.path:
sys.path.append(root_dir)
import pySPACE
try:
pySPACE.load_configuration("config.yaml")
except:
pass
import pySPACE.missions.nodes
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# autodoc is an extension to extract documentation automatically
# viewcode is an extension to link the corresponding sourcecode
# as a link automatically with syntax highlighting
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
# 'numpy_ext.numpydoc',
# 'matplotlib.sphinxext.plot_directive',
# 'matplotlib.sphinxext.only_directives',
]
autosummary_generate = True
# switches the showing of todos on or of
#todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pySPACE'
copyright = u'2012, pySPACE Developer Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5 alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build','templates','includes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Variable for the settings of autodoc
# 'members' of the module or class used with autodoc will we shown
# So the classes don't have to be listed individually when using autodoc with a file module
# 'undoc-members' makes sure, even undocumented members are listed
# 'show-inheritance' adds a short line where you get to know what the mother class is.
# 'private-members' is only available since version 1.1 of sphinx
# Now also 'members' beginning with "_" will be included in documentation.
# 'inherited-members' would also include inherited members in the documentation generation.
# Normally they are ommited because the class doesn't change these functions.
# If you set one of these flags in this configuration value,
# you can use a negated form, 'no-flag', in an autodoc directive, to disable it once.
# .. automodule:: foo
# :no-undoc-members:
# undoc-members','inherited-members','members','show-inheritance', 'private-members'
# Python “special” members (that is, those named like __special__) will be included if the special-members flag option is given
autodoc_default_flags = ['members','show-inheritance','undoc-members','private-members', 'special-members']
# 'private-members' is only available since version 1.1 of sphinx
# Now also 'members' beginning with "_" will be included in documentation.
# # Activate this parameter to say where its documentation comes from
# # The default 'should be' to concatenate the doc-strings of the class and its
# # __init__ function.
# autoclass_content = 'class' #'both', 'class', 'init'
autoclass_content = 'class' #'both'
# # This value selects if automatically documented members
# # are sorted alphabetical (value 'alphabetical'),
# # by member type (value 'groupwise') or by source order (value 'bysource').
autodoc_member_order = 'bysource'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_theme = "sphinxdoc"
#html_theme_options = {
# "rightsidebar": "false",
# "relbarbgcolor": "black"
#}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'pySPACE.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "graphics/pyspace-logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "graphics/pyspace-logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'docs'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'pySPACE.tex', ur'pySPACE Documentation',
ur'pySPACE Developer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "graphics/pyspace-logo_small.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def fix_sig(app, what, name, obj, options, signature,
return_annotation):
""" Underline class name and separate it from parameters
**Deprecated**
"""
if 'class' == what:
# underline class name manually
new_signature="\n"
new_signature+="-"*(len(str(name))+7) # 'class ' is not included in length
new_signature+="\n"
if signature:
# delete beginning and ending brackets
parameters = signature #[1:-1]
parameters = parameters.replace(", **kwargs","")
parameters = parameters.replace(", *args","")
if len(parameters)>0:
# add the parameters seperately
# unfortunately this is done in bold for unknown reasons
# (probably the first newline is the reason)
# extra dot is added for extra blank line
new_signature+=".\n"
new_signature+="Parameters:"
new_signature+="\n\n"
# the parameters should be indented but this doesn't work
new_signature+=" "
new_signature+=parameters
return (new_signature, return_annotation)
else:
return (signature, return_annotation)
def missing_docstring(app, what, name, obj, options, lines):
""" Construct a list of components having no docsting
.. todo:: Discover where the 'type ERROR' comes from in CCS
"""
if len(lines)==0 and not str(name).endswith("__init__"):
f = open(os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt"),"a")
f.write(str(name)+"\n")
else:
for line in lines:
if "document" in line and "todo" in line:
f = open(os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt"),"a")
f.write("\n"+str(name)+"\n"+line+"\n \n")
if 'class' == what and str(name).endswith("Node"):
# e.g. pySPACE.missions.nodes.spatial_filtering.spatial_filtering.SpatialFilteringNode
lines.append("")
lines.append("")
lines.append(":POSSIBLE NODE NAMES:")
lines.append("")
for key, value in pySPACE.missions.nodes.NODE_MAPPING.items():
if value.__module__+"."+value.__name__==name:
lines.append(" - **"+key+"**")
lines.append("")
lines.append("")
# Add class summary
# For unknown reasons, this part produces warnings and errors
# referring to except and types, but the reason is unclear
if 'class' == what and not len(lines)==0 and not "Metaclass" in name and \
not name.endswith("SklearnNode"):
new_lines=[]
new_lines.append("")
new_lines.append("**Class Components Summary**")
new_lines.append("")
new_lines.append(".. autosummary::")
new_lines.append("")
method_list = inspect.getmembers(obj) #,inspect.ismethod
for method,value in method_list:
if not method in ["__doc__","__module__","__metaclass__","__dict__","__init__","__weakref__"] and method in obj.__dict__.keys():
new_lines.append(" "+method)
# if "type" in obj.__name__ or "type" in method or "except" in new_lines[-1]:
# print obj
# print name
# print method
# print
# only one method found
if len(new_lines)<=5:
new_lines=[]
lines.extend(new_lines)
lines.append("")
lines.append("")
def setup(app):
""" Activate fix_sig and missing_docstring and delete old 'undocumented.txt'
.. todo:: Fix file handling. Only works with 'make html_complete'
"""
# app.connect('autodoc-process-signature', fix_sig)
app.connect('autodoc-process-docstring', missing_docstring)
# clean up auto-un-documentation files
fname=os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt")
if os.access(fname,os.F_OK):
os.remove(fname)
######################### preparation #########################################
# delete old list of nodes
fname=os.path.join(os.path.dirname(__file__),"nodes.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
location = "pySPACE.missions.nodes"
offset = len(location)+1
node_list=[]
for key,value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
######################### header ###############################################
f=open(fname,"a")
f.write(".. AUTO-GENERATED FILE -- DO NOT EDIT! (conf.py)\n")
f.write(".. _node_list: \n")
f.write("\n")
f.write("List of all Nodes \n")
f.write("======================= \n")
f.write("\n")
f.write("pySPACE comes along with a big choice of processing nodes.\n")
f.write("They can be accessed via :class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.\n")
f.write("In the following you can get an overview on their functionality, \n")
f.write("the mapping from node names in specification files to the node class \n")
f.write("and vice versa.\n")
f.write("\n")
f.write("For details on the usage of the nodes and for getting usage examples, "
"have a look at their documentation.\n")
######################### node summary #########################################
f.write("\n")
f.write("Mapping of Class Names to Functionality \n")
f.write("--------------------------------------- \n")
f.write("\n")
#f.write("\n.. currentmodule:: %s\n\n"%location)
#f.write(".. autosummary:: \n")
#f.write(" :nosignatures:\n")
#f.write(" :toctree: nodes\n")
f.write("\n")
current_location=""
for node in node_list:
if not node == "pySPACE.missions.nodes.base_node.BaseNode" and \
not "template" in node:
new_location=node[offset:].split(".")[0]
if not new_location==current_location:
current_module=location+"."+new_location
f.write("\n")
f.write("%s\n"%new_location)
f.write("+"*(len(new_location))+"\n")
f.write(" \n|\n\n")
f.write(".. currentmodule:: %s\n"%location)
f.write(".. autosummary:: \n\n %s\n\n|\n\n"%current_module[offset:])
# if not current_module=="pySPACE.missions.nodes.splitter":
# f.write(".. automodule:: %s\n :no-members:\n\n"%current_module)
# else:
# f.write("Control how data is split into training and testing data\n\n")
f.write(".. currentmodule:: %s\n"%current_module)
f.write(".. autosummary:: \n")
f.write(" :nosignatures:\n")
f.write("\n")
current_location=new_location
current_offset = len(current_module)+1
f.write(" "+node[current_offset:]+"\n")
f.write("\n")
######################### node name --> class name ############################
node_name_dict = pySPACE.missions.nodes.NODE_MAPPING
name_list = [(name,value.__module__[offset:]+"."+value.__name__) for name,value in node_name_dict.items()]
f.write(".. currentmodule:: %s\n\n"%location)
f.write("Mapping of Node Names to Class Names \n")
f.write("------------------------------------ \n")
f.write("\n")
name_list.sort(key=lambda x: x[0].lower())
for name,class_name in name_list:
f.write(" - "+name+": "+":class:`"+class_name+"`"+"\n")
######################### class name --> node name ############################
f.write("\n")
f.write("Mapping of Class Names to Node Names \n")
f.write("------------------------------------ \n")
f.write("\n")
name_list.sort(key=lambda x: (x[1].lower(),x[0]))
for name,class_name in name_list:
f.write(" - "+":class:`"+class_name+"`"+": "+name+"\n")
f.close()
######################### operation example list #############################
#examples operations
fname=os.path.join(os.path.dirname(__file__),"examples","operations.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
specs_path=os.path.join(os.path.dirname(__file__),"examples","specs")
examples=os.path.join(specs_path,"operations","examples")
f=open(fname,"a")
f.write(".. _operation_examples: \n")
f.write("\n")
f.write("Operation Examples \n")
f.write("=========================== \n")
f.write("\n")
f.write("These are examples of yaml files you can use as a template\n")
f.write("for your own operations. For details on operations have a look at the respective documentation.\n")
f.write("\n")
# adding example files
for folder, _, files in os.walk(examples):
for fname in files:
f.write(fname + "\n")
f.write("------------------------------------------\n")
f.write("\n")
f.write(".. literalinclude:: " + os.path.join("specs","operations","examples",fname) + "\n")
f.write("\t" + ":language: yaml" + "\n")
f.write("\n")
f.close()
######################### operation chain example list ########################
#examples operation_chains
examples=os.path.join(specs_path,"operation_chains","examples")
fname=os.path.join(os.path.dirname(__file__),"examples","operation_chains.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
f=open(fname,"a")
f.write(".. _operation_chain_examples: \n")
f.write("\n")
f.write("Operation Chain Examples \n")
f.write("============================ \n")
f.write("\n")
f.write("These are examples of yaml files you can use as a template\n")
f.write("for your own operation chains. For details on operation chains have a look at the respective documentation.\n")
f.write("\n")
# adding example files
for folder, _, files in os.walk(examples):
for fname in files:
f.write(fname + "\n")
f.write("------------------------------------------\n")
f.write("\n")
f.write(".. literalinclude:: " + os.path.join("specs","operation_chains","examples",fname) + "\n")
f.write("\t" + ":language: yaml" + "\n")
f.write("\n")
f.close()
######################### preparation of external node documentation ##########
# delete old list of nodes
fname=os.path.join(os.path.dirname(__file__),"external_nodes.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
location = "pySPACE.missions.nodes"
offset = len(location)+1
f=open(fname,"a")
######################### header ###############################################
f.write(".. AUTO-GENERATED FILE -- DO NOT EDIT! (conf.py)\n")
f.write(".. _external_nodes: \n")
f.write("\n")
f.write("Documentation of External and Wrapped Nodes \n")
f.write("=========================================== \n")
f.write("\n")
f.write("pySPACE comes along with wrappers to external algorithms.\n")
f.write("\n")
f.write("For details on the usage of the nodes and for getting usage examples, \n"
"have a look at their documentation.\n")
node_list = []
for key, value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
if value.__module__ == "pySPACE.missions.nodes.external":
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
if len(node_list) > 0:
f.write("\n")
f.write(".. _external_folder: \n")
f.write("\n")
f.write("External Nodes \n")
f.write("-------------- \n")
f.write("\n")
f.write("Nodes from :mod:`external folder <pySPACE.missions.nodes.external>`\n\n")
cl = ""
for node in node_list:
cl += "\n:class:`" + node + "`\n"
cl += "~"*(len(node)+9)+"\n\n"
cl += ".. autoclass:: %s\n" % node
cl += " :noindex:\n\n"
f.write(cl)
else:
f.write("Module for external node wrapping: :mod:`pySPACE.missions.nodes.external`\n")
######################### scikit nodes #########################################
node_list = []
for key, value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
if value.__name__.endswith("SklearnNode"):
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
if len(node_list) > 0:
f.write("\n")
f.write(".. _scikit_nodes: \n")
f.write("\n")
f.write("Scikit Nodes \n")
f.write("------------ \n")
f.write("\n")
f.write("Nodes from :mod:`scikits wrapper <pySPACE.missions.nodes.scikits_nodes>`\n\n")
cl = ""
for node in node_list:
cl += "\n:class:`" + node + "`\n"
cl += "~"*(len(node)+9)+"\n\n"
cl += ".. autoclass:: %s\n :no-members:\n\n" % node
f.write(cl)
f.close()
inheritance_graph_attrs = dict(rankdir="TB",fontsize=5,ratio='compress',nodesep=0.1,sep=0.1, pad=0.001,size= '"10.0, 25.0"') #, size='""'
graphviz_output_format = 'png' #'svg' svg is good for scaling but linking seems to work only with png
#inheritance_node_attrs = dict(shape='rectangle', fontsize=8, height=0.7,
# color='grey', style='filled')
inheritance_node_attrs = dict(shape='rectangle', fontsize=10, height=0.02,width=0.02,margin=0.005)
| pyspace/test | docs/conf.py | Python | gpl-3.0 | 22,175 |
from base import *
SERVER = "redir_paths_1"
MAGIC = 'Reproducing bug report #432'
CONF = """
vserver!2270!nick = %s
vserver!2270!document_root = %s
vserver!2270!rule!1!match = default
vserver!2270!rule!1!handler = file
vserver!2270!rule!10!match = request
vserver!2270!rule!10!match!request = file.*
vserver!2270!rule!10!handler = redir
vserver!2270!rule!10!handler!rewrite!1!show = 0
vserver!2270!rule!10!handler!rewrite!1!regex = (.*)
vserver!2270!rule!10!handler!rewrite!1!substring = internal.txt
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Internal Redir: Paths"
self.request = "GET /in/file.foo HTTP/1.1\r\n" + \
"Host: %s\r\n" % (SERVER) + \
"Connection: Close\r\n"
self.expected_error = 200
self.expected_content = MAGIC
def Prepare (self, www):
d = self.Mkdir (www, "%s_droot/in"%(SERVER))
self.WriteFile (d, "internal.txt", 0444, MAGIC)
self.conf = CONF % (SERVER, d)
| lmcro/webserver | qa/227-Redir-Paths.py | Python | gpl-2.0 | 1,107 |
from unittest import TestCase
from nose.tools import assert_equal, assert_raises
from pycassa.logging.pool_stats_logger import StatsLogger
from pycassa.pool import ConnectionPool, NoConnectionAvailable, InvalidRequestError
__author__ = 'gilles'
_credentials = {'username': 'jsmith', 'password': 'havebadpass'}
class TestStatsLogger(TestCase):
def __init__(self, methodName='runTest'):
super(TestStatsLogger, self).__init__(methodName)
def setUp(self):
super(TestStatsLogger, self).setUp()
self.logger = StatsLogger()
def test_empty(self):
assert_equal(self.logger.stats, self.logger._stats)
def test_connection_created(self):
self.logger.connection_created({'level': 'info'})
self.logger.connection_created({'level': 'error'})
stats = self.logger.stats
assert_equal(stats['created']['success'], 1)
assert_equal(stats['created']['failure'], 1)
def test_connection_checked(self):
self.logger.connection_checked_out({})
self.logger.connection_checked_out({})
self.logger.connection_checked_in({})
stats = self.logger.stats
assert_equal(stats['checked_out'], 2)
assert_equal(stats['checked_in'], 1)
assert_equal(stats['opened'], {'current': 1, 'max': 2})
def test_connection_disposed(self):
self.logger.connection_disposed({'level': 'info'})
self.logger.connection_disposed({'level': 'error'})
stats = self.logger.stats
assert_equal(stats['disposed']['success'], 1)
assert_equal(stats['disposed']['failure'], 1)
def test_connection_recycled(self):
self.logger.connection_recycled({})
stats = self.logger.stats
assert_equal(stats['recycled'], 1)
def test_connection_failed(self):
self.logger.connection_failed({})
stats = self.logger.stats
assert_equal(stats['failed'], 1)
def test_obtained_server_list(self):
self.logger.obtained_server_list({})
stats = self.logger.stats
assert_equal(stats['list'], 1)
def test_pool_at_max(self):
self.logger.pool_at_max({})
stats = self.logger.stats
assert_equal(stats['at_max'], 1)
class TestInPool(TestCase):
def __init__(self, methodName='runTest'):
super(TestInPool, self).__init__(methodName)
def test_pool(self):
listener = StatsLogger()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['created']['failure'], 0)
assert_equal(listener.stats['checked_out'], 10)
assert_equal(listener.stats['opened'], {'current': 10, 'max': 10})
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['checked_out'], 10)
assert_equal(listener.stats['opened'], {'current': 10, 'max': 10})
assert_equal(listener.stats['at_max'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(listener.stats['disposed']['success'], 0)
assert_equal(listener.stats['checked_in'], 5)
assert_equal(listener.stats['opened'], {'current': 5, 'max': 10})
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(listener.stats['disposed']['success'], 5)
assert_equal(listener.stats['checked_in'], 10)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 10)
assert_equal(listener.stats['checked_out'], 15)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(listener.stats['created']['success'], 15)
assert_equal(listener.stats['checked_out'], 20)
assert_equal(listener.stats['disposed']['success'], 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
print "in test:", id(conns[-1])
conns[-1].return_to_pool()
assert_equal(listener.stats['checked_in'], 20)
assert_equal(listener.stats['disposed']['success'], 10)
pool.dispose()
| pycassa/pycassa | tests/test_pool_logger.py | Python | mit | 4,992 |
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def exit():
import os, signal
os.kill(os.getpid(), signal.SIGTERM)
def _RunCLI():
import code, sys, threading
try:
raise None
except:
namespace = sys.exc_info()[2].tb_frame.f_back.f_back.f_globals
namespace.setdefault('exit', exit)
def CLI():
while True:
code.interact(local=namespace, banner='')
print("Not exiting implicitly. Use exit() if you really want to.")
dt = ndt = 0
for thread in threading.enumerate():
if thread.daemon:
dt += 1
else:
ndt += 1
print("(%d threads: %d primary, %d daemon)" % (dt + ndt, ndt, dt))
threading.Timer(0, CLI).start()
_RunCLI()
| FinalHashLLC/eloipool | lib/interactivemode.py | Python | agpl-3.0 | 1,385 |
#!/usr/bin/python
# This program will find the coefficients in an energy expansion of a
# probability vector
import sys # Mainly for file IO
from math import *
from array import * #For fast arrays
def readPVector (filename):
""" reads a probability vector from the file given in filename"""
try:
f = open(filename,'r')
except IOError:
print 'File could not be opened'
exit(1);
lines = f.readlines()
splitlines = []
for line in lines:
splitlines += [line.split()]
return array ('d', [float(x[1]) for x in splitlines])
def bitString(s):
return str(s) if s<=1 else bitString(s>>1) + str(s&1)
def parity (s):
""" Returns the parity of a bitstring"""
su = 0;
for b in s:
if b=='1':
su=su+1
return su%2
def bitWeight (n):
""" Returns the number of ones in a bitstring"""
su = 0;
for b in bitString(n):
if b=='1':
su=su+1
return su;
def E(A,x):
""" Returns the E(A,x), A,x given as integers """
return (-1)**( bitWeight (A&x) )
def project(A,H):
count=0
su=0
for h in H:
su += E(A,count) *h
count+=1
return su
def bitStringtoSet (s):
s = s[::-1] # Reverses the string
se = []
count = 0
for b in s:
count += 1
if b=='1':
se.append(count);
return se
def isBelow(i,j):
# i and j integers, true if every bit which is one in 'i' is also
# one in 'j'
# if the bitwise and gives back 'i' this statement is true;
if (i & j) == i:
return True
return False
######## Main program starts here ##############
try:
Pfile = sys.argv[1]
except IndexError:
print 'Please give filename as first argument';
exit(1);
P = readPVector(Pfile)
for p in P:
if p<=0 or p>=1:
print 'Error, support of P is no probability vector'
exit(1)
n2 = len(P)
# Uncomment to see the read in P
#for i in range (n2):
# print bitString(i) + " : " + str(P[i])
# Take the logarithm of the probabilities
P = [log (p) for p in P]
#Uncomment to see the energy
# print "This is the energy"
# for i in range (n2):
# print bitString(i) + str(P[i])
# Iterating over the number of elements in P
# In this decomposition for each element we substract from all the
# elements above it the respecetive coefficient.
# We start by reading of the coefficient of the empty set from P(0..0)
result = []
# We need to sort by bitweight.
indices = range (n2)
indices.sort(key = lambda t:bitWeight(t))
for i in indices:
coeff = P[i]
result.append([i, coeff])
for j in indices:
if isBelow(i,j):
P[j] = P[j]-coeff;
for c in result:
print ' : '.join( [str(bitStringtoSet(bitString(c[0]))),str( c[1])])
| tom111/cipi | src/product_coefficients.py | Python | gpl-3.0 | 2,798 |
import os
import errno
import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_text, filepath_to_uri
from django.utils.functional import LazyObject
from django.utils.module_loading import import_by_path
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
from django.utils._os import safe_join, abspathu
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content)
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_text(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
self.base_location = location
self.location = abspathu(self.base_location)
if base_url is None:
base_url = settings.MEDIA_URL
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with FILE_UPLOAD_PERMISSIONS behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# Note that there is a race between os.path.exists and os.remove:
# if os.remove fails with ENOENT, the file was removed
# concurrently, and we can continue normally.
if os.path.exists(name):
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousFileOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urljoin(self.base_url, filepath_to_uri(name))
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_by_path(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| adambrenecki/django | django/core/files/storage.py | Python | bsd-3-clause | 10,943 |
import pygration
@pygration.step_class
class AccountTable(object):
"""Creates an account table."""
def add( self, db ):
db.sql("CREATE TABLE account (id integer, balance integer);")
| mdg/pygrate | test/test1/v002.py | Python | apache-2.0 | 202 |
API_XML_NSMAP = {
"csw": "http://www.opengis.net/cat/csw/2.0.2",
"dc": "http://purl.org/dc/elements/1.1/",
"dct": "http://purl.org/dc/terms/",
"geonet": "http://www.fao.org/geonetwork",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
LINKED_XML_NSMAP = {
"csw": "http://www.opengis.net/cat/csw/2.0.2",
"gco": "http://www.isotc211.org/2005/gco",
"gmd": "http://www.isotc211.org/2005/gmd",
"gml": "http://www.opengis.net/gml/3.2",
"gmx": "http://www.isotc211.org/2005/gmx",
"srv": "http://www.isotc211.org/2005/srv",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
| opendatatrentino/opendata-harvester | harvester_odt/pat_geocatalogo/constants.py | Python | bsd-2-clause | 669 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from ....util import Stub, StubProxy
from testing_tools.adapter.errors import UnsupportedCommandError
from testing_tools.adapter.pytest._cli import add_subparser
class StubSubparsers(StubProxy):
def __init__(self, stub=None, name="subparsers"):
super(StubSubparsers, self).__init__(stub, name)
def add_parser(self, name):
self.add_call("add_parser", None, {"name": name})
return self.return_add_parser
class StubArgParser(StubProxy):
def __init__(self, stub=None):
super(StubArgParser, self).__init__(stub, "argparser")
def add_argument(self, *args, **kwargs):
self.add_call("add_argument", args, kwargs)
class AddCLISubparserTests(unittest.TestCase):
def test_discover(self):
stub = Stub()
subparsers = StubSubparsers(stub)
parser = StubArgParser(stub)
subparsers.return_add_parser = parser
add_subparser("discover", "pytest", subparsers)
self.assertEqual(
stub.calls,
[
("subparsers.add_parser", None, {"name": "pytest"}),
],
)
def test_unsupported_command(self):
subparsers = StubSubparsers(name=None)
subparsers.return_add_parser = None
with self.assertRaises(UnsupportedCommandError):
add_subparser("run", "pytest", subparsers)
with self.assertRaises(UnsupportedCommandError):
add_subparser("debug", "pytest", subparsers)
with self.assertRaises(UnsupportedCommandError):
add_subparser("???", "pytest", subparsers)
self.assertEqual(
subparsers.calls,
[
("add_parser", None, {"name": "pytest"}),
("add_parser", None, {"name": "pytest"}),
("add_parser", None, {"name": "pytest"}),
],
)
| DonJayamanne/pythonVSCode | pythonFiles/tests/testing_tools/adapter/pytest/test_cli.py | Python | mit | 1,960 |
#!/usr/bin/python
import os, sys
from optparse import OptionParser
from subprocess import *
import shutil
def parseArgs():
parser = OptionParser()
parser.add_option("-e", "--elf-file", dest="elf_file",
help="elf file to extract", metavar="ELF")
parser.add_option("-o", "--objdump", dest="objdump", default="/usr/bin/objdump",
help="objdump binary location", metavar="OBJDUMP")
(options, args) = parser.parse_args()
if not (options.elf_file and options.objdump):
parser.error("elf and objdump are required")
return options, args
def writeFile(name, start, end):
f = open(name, "w")
f.write("{} {}\n".format(start, end-start))
f.close()
def main(options, args):
command = [options.objdump, "-t", options.elf_file]
p = Popen(command, stdout=PIPE)
(stdout, stderr) = p.communicate(None)
lines = stdout.split('\n')
for l in lines:
cols = l.split(' ')
name = cols[-1]
addr = cols[0]
if(cols[-1] == "_stext_fail"):
text_start = int(addr, 16)
if(cols[-1] == "_etext_fail"):
text_end = int(addr, 16)
if(cols[-1] == "_sdata_fail"):
data_start = int(addr, 16)
if(cols[-1] == "_edata_fail"):
data_end = int(addr, 16)
if(cols[-1] == "_sstack"):
stack_start = int(addr, 16)
if(cols[-1] == "_estack"):
stack_end = int(addr, 16)
writeFile("text_map", text_start, text_end)
writeFile("data_map", data_start, data_end)
writeFile("stack_map", stack_start, stack_end)
sys.exit(p.returncode)
if __name__ == "__main__":
(options, pargs) = parseArgs()
main(options, pargs)
| danceos/dosek | fail/mapper.py | Python | lgpl-3.0 | 1,641 |
"""
Python bindings to odesk API
python-odesk version 0.1
(C) 2010 oDesk
"""
import odesk
PUBLIC_KEY = None
SECRET_KEY = None
#TODO: Desktop app example (check if it's working at all - wasn't last time)
def web_based_app(public_key, secret_key):
print "Emulating web-based app"
#Instantiating a client without an auth token
client = odesk.Client(public_key, secret_key)
print "Please to this URL (authorize the app if necessary):"
print client.auth.auth_url()
print "After that you should be redirected back to your app URL with " + \
"additional ?frob= parameter"
frob = raw_input('Enter frob: ')
auth_token, user = client.auth.get_token(frob)
print "Authenticated user:"
print user
#Instantiating a new client, now with a token.
#Not strictly necessary here (could just set `client.auth_token`), but
#typical for web apps, which wouldn't probably keep client instances
#between requests
client = odesk.Client(public_key, secret_key, auth_token)
try:
print "Team rooms:"
print client.team.get_teamrooms()
#HRv2 API
print "HR: companies"
print client.hr.get_companies()
print "HR: teams"
print client.hr.get_teams()
print "HR: offers"
print client.hr.get_offers()
print "HR: get_engagements"
print client.hr.get_engagements()
print "HR: userroles"
print client.hr.get_user_role()
print "Get jobs"
print client.provider.get_jobs({'q': 'python'})
print "Financial: withdrawal methods"
print client.finance.get_withdrawal_methods()
print "Revoke access"
print client.auth.revoke_token()
except Exception, e:
print "Exception at %s %s" % (client.last_method, client.last_url)
raise e
if __name__ == '__main__':
public_key = PUBLIC_KEY or raw_input('Enter public key: ')
secret_key = SECRET_KEY or raw_input('Enter secret key: ')
web_based_app(public_key, secret_key)
| solex/python-odesk | examples/examples.py | Python | bsd-3-clause | 2,045 |
# Generated by Django 2.2.13 on 2021-03-29 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("klasses", "0024_add_bootcamp_run_id")]
operations = [
migrations.AddField(
model_name="bootcamprun",
name="allows_skipped_steps",
field=models.BooleanField(default=False),
)
]
| mitodl/bootcamp-ecommerce | klasses/migrations/0025_bootcamprun_allows_skipped_steps.py | Python | bsd-3-clause | 392 |
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
def createFile(folder, filename):
__createProjectOrFileSelectType__(" General", "Empty File", isProject = False)
replaceEditorContent(waitForObject("{name='nameLineEdit' visible='1' "
"type='Utils::FileNameValidatingLineEdit'}"), filename)
replaceEditorContent(waitForObject("{type='Utils::FancyLineEdit' unnamed='1' visible='1' "
"window={type='ProjectExplorer::JsonWizard' unnamed='1' "
"visible='1'}}"), folder)
clickButton(waitForObject(":Next_QPushButton"))
__createProjectHandleLastPage__()
def clickTableGetPatternLineEdit(table, row):
clickItem(table, row, 5, 5, 0, Qt.LeftButton)
return waitForObject("{name='patternsLineEdit' type='QLineEdit' visible='1'}")
def getOrModifyFilePatternsFor(mimeType, filter='', toBePresent=None):
toSuffixArray = lambda x : [pat.replace("*", "") for pat in x.split(";")]
result = []
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Environment")
clickItem(":Options_QListView", "Environment", 14, 15, 0, Qt.LeftButton)
waitForObject("{container=':Options.qt_tabwidget_tabbar_QTabBar' type='TabItem' "
"text='MIME Types'}")
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "MIME Types")
replaceEditorContent(waitForObject("{name='filterLineEdit' type='QLineEdit' visible='1'}"),
filter)
mimeTypeTable = waitForObject("{name='mimeTypesTreeView' type='QTreeView' visible='1'}")
model = mimeTypeTable.model()
if filter == '':
for row in dumpItems(model):
if row == mimeType:
result = toSuffixArray(str(clickTableGetPatternLineEdit(mimeTypeTable, row).text))
break
clickButton(":Options.Cancel_QPushButton")
if result == ['']:
test.warning("MIME type '%s' seems to have no file patterns." % mimeType)
return result
waitFor('model.rowCount() == 1', 2000)
if model.rowCount() == 1:
patternsLineEd = clickTableGetPatternLineEdit(mimeTypeTable, dumpItems(model)[0])
patterns = str(patternsLineEd.text)
if toBePresent:
actualSuffixes = toSuffixArray(patterns)
toBeAddedSet = set(toBePresent).difference(set(actualSuffixes))
if toBeAddedSet:
patterns += ";*" + ";*".join(toBeAddedSet)
replaceEditorContent(patternsLineEd, patterns)
clickButton(":Options.OK_QPushButton")
try:
mBox = waitForObject("{type='QMessageBox' unnamed='1' visible='1' "
"text?='Conflicting pattern*'}", 2000)
conflictingSet = set(str(mBox.detailedText).replace("*", "").splitlines())
clickButton(waitForObject("{text='OK' type='QPushButton' unnamed='1' visible='1' "
"window={type='QMessageBox' unnamed='1' visible='1'}}"))
if toBeAddedSet.intersection(conflictingSet):
test.fatal("At least one of the patterns to be added is already in use "
"for another MIME type.",
"Conflicting patterns: %s" % str(conflictingSet))
if conflictingSet.difference(toBeAddedSet):
test.fail("MIME type handling failed. (QTCREATORBUG-12149?)",
"Conflicting patterns: %s" % str(conflictingSet))
# re-check the patterns
result = getOrModifyFilePatternsFor(mimeType)
except:
result = toSuffixArray(patterns)
test.passes("Added suffixes")
return result
else:
result = toSuffixArray(patterns)
else:
result = toSuffixArray(patterns)
elif model.rowCount() > 1:
test.warning("MIME type '%s' has ambiguous results." % mimeType)
else:
test.log("MIME type '%s' seems to be unknown to the system." % mimeType)
clickButton(":Options.Cancel_QPushButton")
return result
def uncheckGenericHighlighterFallback():
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Text Editor")
clickItem(":Options_QListView", "Text Editor", 14, 15, 0, Qt.LeftButton)
waitForObject("{container=':Options.qt_tabwidget_tabbar_QTabBar' type='TabItem' "
"text='Generic Highlighter'}")
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Generic Highlighter")
ensureChecked("{name='useFallbackLocation' text='Use fallback location' type='QCheckBox' "
"visible='1'}", False)
clickButton(":Options.OK_QPushButton")
def addHighlighterDefinition(language):
global tmpSettingsDir
test.log("Adding highlighter definitions for '%s'." % language)
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Text Editor")
clickItem(":Options_QListView", "Text Editor", 14, 15, 0, Qt.LeftButton)
waitForObject("{container=':Options.qt_tabwidget_tabbar_QTabBar' type='TabItem' "
"text='Generic Highlighter'}")
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Generic Highlighter")
clickButton("{text='Download Definitions...' type='QPushButton' unnamed='1' visible='1'}")
table = waitForObject("{name='definitionsTable' type='QTableWidget' visible='1'}")
model = table.model()
for row in range(model.rowCount()):
if str(model.data(model.index(row, 0)).toString()) == language:
clickItem(table, "%d/0" % row, 5, 5, 0, Qt.LeftButton)
clickButton("{name='downloadButton' text='Download Selected Definitions' "
"type='QPushButton' visible='1'}")
# downloading happens asynchronously but may take a little time
progressBarWait(10000)
languageFile = os.path.join(tmpSettingsDir, "QtProject", "qtcreator",
"generic-highlighter", "%s.xml"
% language.lower().replace(" ", "-"))
test.verify(waitFor("os.path.exists(languageFile)", 10000),
"Verifying whether highlight definition file for '%s' has been downloaded "
"and placed to settings." % language)
clickButton("{text='Download Definitions...' type='QPushButton' unnamed='1' "
"visible='1'}")
table = waitForObject("{name='definitionsTable' type='QTableWidget' visible='1'}")
model = table.model()
test.verify(str(model.data(model.index(row, 1))) != "",
"Verifying a definition has been downloaded.")
clickButton("{text='Close' type='QPushButton' unnamed='1' visible='1'}")
clickButton(":Options.OK_QPushButton")
return True
test.fail("Could not find the specified language (%s) to download a highlighter definition"
% language)
clickButton("{text='Close' type='QPushButton' unnamed='1' visible='1'}")
clickButton(":Options.OK_QPushButton")
return False
def hasSuffix(fileName, suffixPatterns):
for suffix in suffixPatterns:
if fileName.endswith(suffix):
return True
return False
def displayHintForHighlighterDefinition(fileName, patterns, lPatterns, added, addedLiterate):
if hasSuffix(fileName, patterns):
return not added
if hasSuffix(fileName, lPatterns):
return not addedLiterate
test.warning("Got an unexpected suffix.", "Filename: %s, Patterns: %s"
% (fileName, str(patterns + lPatterns)))
return False
def main():
miss = "A highlight definition was not found for this file. Would you like to try to find one?"
startQC()
if not startedWithoutPluginError():
return
uncheckGenericHighlighterFallback()
patterns = getOrModifyFilePatternsFor("text/x-haskell", "x-haskell")
lPatterns = getOrModifyFilePatternsFor("text/x-literate-haskell", "literate-haskell")
folder = tempDir()
filesToTest = ["Main.lhs", "Main.hs"]
code = ['module Main where', '', 'main :: IO ()', '', 'main = putStrLn "Hello World!"']
for current in filesToTest:
createFile(folder, current)
editor = getEditorForFileSuffix(current)
if editor == None:
earlyExit("Something's really wrong! (did the UI change?)")
return
expectHint = hasSuffix(current, patterns) or hasSuffix(current, lPatterns)
mssg = "Verifying whether hint for missing highlight definition is present. (expected: %s)"
try:
waitForObject("{text='%s' type='QLabel' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}" % miss, 2000)
test.verify(expectHint, mssg % str(expectHint))
except:
test.verify(not expectHint, mssg % str(expectHint))
# literate haskell: first character must be '>' otherwise it's a comment
if current.endswith(".lhs"):
typeLines(editor, [">" + line for line in code])
else:
typeLines(editor, code)
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Close All")
addedHighlighterDefinition = addHighlighterDefinition("Haskell")
addedLiterateHighlighterDefinition = addHighlighterDefinition("Literate Haskell")
patterns = getOrModifyFilePatternsFor('text/x-haskell', 'x-haskell', ['.hs'])
lPatterns = getOrModifyFilePatternsFor('text/x-literate-haskell', 'literate-haskell', ['.lhs'])
home = os.path.expanduser("~")
for current in filesToTest:
recentFile = os.path.join(folder, current)
if recentFile.startswith(home) and platform.system() in ('Linux', 'Darwin'):
recentFile = recentFile.replace(home, "~", 1)
invokeMenuItem("File", "Recent Files", "(&\\d \| )?%s" % recentFile)
editor = getEditorForFileSuffix(current)
display = displayHintForHighlighterDefinition(current, patterns, lPatterns,
addedHighlighterDefinition,
addedLiterateHighlighterDefinition)
try:
waitForObject("{text='%s' type='QLabel' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}" % miss, 2000)
test.verify(display, "Hint for missing highlight definition was present "
"- current file: %s" % current)
except:
test.verify(not display, "Hint for missing highlight definition is not shown "
"- current file: %s" % current)
placeCursorToLine(editor, '.*%s' % code[-1], True)
for _ in range(23):
type(editor, "<Left>")
type(editor, "<Return>")
if current.endswith(".lhs"):
type(editor, ">")
type(editor, "<Tab>")
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
| sailfish-sdk/sailfish-qtcreator | tests/system/suite_editors/tst_generic_highlighter/test.py | Python | gpl-3.0 | 12,463 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides the is_empty() method."""
def get_member_count(my_sequence):
"""Returns the number of members of a list object.
Args:
my_sequence (sequence): The sequence object being measured.
Returns:
mixed: If the object can be measured it returns an integer. If not it
returns ``False``
Examples:
>>> get_member_count(42)
False
>>> get_member_count('duck')
4
>>> get_member_count(['knights', 'who', 'say', 'ni'])
4
"""
try:
length = len(my_sequence)
except TypeError:
length = False
return length
def is_empty(my_sequence):
"""Tests whether or not the passed sequence is empty.
Args:
my_sequence (sequence): The sequence object being measured.
Returns:
bool: If empty, returns True, otherwise, False.
Raises:
TypeError: If my_sequence is not a sequence object type.
Examples:
>>> is_empty('')
True
>>> is_empty('apple')
False
>>> is_empty([])
True
>>> is_empty(42)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: object has no len()
"""
count = get_member_count(my_sequence)
if count != False:
return count == 0
else:
raise TypeError('Object has no len()')
TEST = ''
print len(TEST)
#print is_empty(TEST)
| eliz79/is210-week-03-synthesizing | task_05.py | Python | mpl-2.0 | 1,491 |
#!/usr/bin/env python
'''
The entry point of this module is parse_args() method which calls
other methods to collect user supplied arguments, parses and
verifies them. Description of these methods are the following:
collect_args: This method collects the user supplied arguments and
returns them as an aprgparse ArgumentParser object.
extract_args: This method puts the user supplied arguments into an
ordered dictionary and returns it at the end.
check_args: This method verifies the correctness of the user supplied
arguments and puts them into an ordered dictionary which it returns
at the end.
parse_args: This method calls the above methods and returns the final
dictionary of the user supplied arguments to the calling point.
'''
import os
import sys
import argparse
import re
from collections import OrderedDict
def collect_args():
"""
This method collects the user supplied arguments and returns them
at the end.
"""
parser = argparse.ArgumentParser(description='Generate a set of training ' + \
'sequences by filtering them out from a UniProt-SwissProt file.')
parser.add_argument('-I1', '--input1', help=' Specifies path to a ' + \
'UniProt-SwissProt file. This opton is mandatory.')
parser.add_argument('-G','--organism', default='', help=' Specifies an organism ' + \
'id, for example, 559292 for Saccharomyces cerevisiae.')
parser.add_argument('-O', '--output', default='', help='Provides user ' + \
'an option to specify an output filename prefix. When not ' + \
'specified, the program will create an output file name.')
return parser
def extract_args(args):
"""
This method builds a dictionary from the user supplied arguments
and returns the constructed dictionary at the end.
"""
args_dict = OrderedDict()
args_dict['t1'] = args.input1
args_dict['outfile'] = args.output
args_dict['g'] = args.organism
return args_dict
def check_args(args_dict,parser):
"""
This method checks the user arguments for consistency. It builds a new
dictionary from these arguments and finally returns this newly created
dictionary.
"""
user_dict = OrderedDict()
for arg in args_dict:
if arg == 't1':
if args_dict[arg] == None:
print ('Missing Uniprot-SwissProt file\n')
print (parser.parse_args(['--help']))
else:
user_dict['t1'] = args_dict[arg]
elif arg == 'outfile':
user_dict[arg] = args_dict[arg]
elif arg == 'g':
user_dict[arg] = args_dict[arg]
elif arg == 'Taxon_ID':
if 'all' in args_dict[arg] or len(args_dict[arg]) == 0:
user_dict[arg] = set([])
else:
args_dict[arg] = [x.capitalize() for x in args_dict[arg]]
user_dict[arg] = set(args_dict[arg])
return user_dict
def parse_args():
"""
This is the entry point for the other methods in this module. It
1. invokes collect_args to collect the user arguments.
2. invokes extract_args to put those arguments into an
ordered dictionary.
3. checks the consistency of those arguments by invoking
check_args which returns an ordered dictionary of correct
arguments.
4. returns the dictionary at the end.
"""
# Collect user arguments:
parser = collect_args()
args_dict = {}
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print ('\n*********************************')
print ("Invalid Arguments")
print ('*********************************\n')
print (parser.parse_args(['--help']))
# Places the user arguments into a dictionary:
args_dict = extract_args(args)
# Checks the consistency of the user args:
user_dict = check_args(args_dict,parser)
return user_dict
if __name__ == '__main__':
print (sys.argv[0] + ':')
print (__doc__)
sys.exit(0)
| arkatebi/SwissProt-stats | ArgParser_xTract_trainingSet.py | Python | gpl-3.0 | 4,101 |
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from pyxmpp import streamtls
from pyxmpp.all import JID, Message
from pyxmpp.jabber.client import JabberClient
from pyxmpp.interface import implements
from pyxmpp.interfaces import *
from module.plugins.hooks.IRCInterface import IRCInterface
class XMPPInterface(IRCInterface, JabberClient):
__name__ = "XMPPInterface"
__version__ = "0.11"
__type__ = "hook"
__config__ = [("activated", "bool", "Activated", False),
("jid", "str", "Jabber ID", "user@exmaple-jabber-server.org"),
("pw", "str", "Password", ""),
("tls", "bool", "Use TLS", False),
("owners", "str", "List of JIDs accepting commands from", "me@icq-gateway.org;some@msn-gateway.org"),
("info_file", "bool", "Inform about every file finished", False),
("info_pack", "bool", "Inform about every package finished", True),
("captcha", "bool", "Send captcha requests", True)]
__description__ = """Connect to jabber and let owner perform different tasks"""
__author_name__ = "RaNaN"
__author_mail__ = "RaNaN@pyload.org"
implements(IMessageHandlersProvider)
def __init__(self, core, manager):
IRCInterface.__init__(self, core, manager)
self.jid = JID(self.getConfig("jid"))
password = self.getConfig("pw")
# if bare JID is provided add a resource -- it is required
if not self.jid.resource:
self.jid = JID(self.jid.node, self.jid.domain, "pyLoad")
if self.getConfig("tls"):
tls_settings = streamtls.TLSSettings(require=True, verify_peer=False)
auth = ("sasl:PLAIN", "sasl:DIGEST-MD5")
else:
tls_settings = None
auth = ("sasl:DIGEST-MD5", "digest")
# setup client with provided connection information
# and identity data
JabberClient.__init__(self, self.jid, password,
disco_name="pyLoad XMPP Client", disco_type="bot",
tls_settings=tls_settings, auth_methods=auth)
self.interface_providers = [
VersionHandler(self),
self,
]
def coreReady(self):
self.new_package = {}
self.start()
def packageFinished(self, pypack):
try:
if self.getConfig("info_pack"):
self.announce(_("Package finished: %s") % pypack.name)
except:
pass
def downloadFinished(self, pyfile):
try:
if self.getConfig("info_file"):
self.announce(
_("Download finished: %(name)s @ %(plugin)s") % {"name": pyfile.name, "plugin": pyfile.pluginname})
except:
pass
def run(self):
# connect to IRC etc.
self.connect()
try:
self.loop()
except Exception, ex:
self.logError("pyLoad XMPP: %s" % str(ex))
def stream_state_changed(self, state, arg):
"""This one is called when the state of stream connecting the component
to a server changes. This will usually be used to let the user
know what is going on."""
self.logDebug("pyLoad XMPP: *** State changed: %s %r ***" % (state, arg))
def disconnected(self):
self.logDebug("pyLoad XMPP: Client was disconnected")
def stream_closed(self, stream):
self.logDebug("pyLoad XMPP: Stream was closed | %s" % stream)
def stream_error(self, err):
self.logDebug("pyLoad XMPP: Stream Error: %s" % err)
def get_message_handlers(self):
"""Return list of (message_type, message_handler) tuples.
The handlers returned will be called when matching message is received
in a client session."""
return [("normal", self.message)]
def message(self, stanza):
"""Message handler for the component."""
subject = stanza.get_subject()
body = stanza.get_body()
t = stanza.get_type()
self.logDebug(u'pyLoad XMPP: Message from %s received.' % (unicode(stanza.get_from(),)))
self.logDebug(u'pyLoad XMPP: Body: %s Subject: %s Type: %s' % (body, subject, t))
if t == "headline":
# 'headline' messages should never be replied to
return True
if subject:
subject = u"Re: " + subject
to_jid = stanza.get_from()
from_jid = stanza.get_to()
#j = JID()
to_name = to_jid.as_utf8()
from_name = from_jid.as_utf8()
names = self.getConfig("owners").split(";")
if to_name in names or to_jid.node + "@" + to_jid.domain in names:
messages = []
trigger = "pass"
args = None
try:
temp = body.split()
trigger = temp[0]
if len(temp) > 1:
args = temp[1:]
except:
pass
handler = getattr(self, "event_%s" % trigger, self.event_pass)
try:
res = handler(args)
for line in res:
m = Message(
to_jid=to_jid,
from_jid=from_jid,
stanza_type=stanza.get_type(),
subject=subject,
body=line)
messages.append(m)
except Exception, e:
self.logError("pyLoad XMPP: " + repr(e))
return messages
else:
return True
def response(self, msg, origin=""):
return self.announce(msg)
def announce(self, message):
""" send message to all owners"""
for user in self.getConfig("owners").split(";"):
self.logDebug("pyLoad XMPP: Send message to %s" % user)
to_jid = JID(user)
m = Message(from_jid=self.jid,
to_jid=to_jid,
stanza_type="chat",
body=message)
stream = self.get_stream()
if not stream:
self.connect()
stream = self.get_stream()
stream.send(m)
def beforeReconnecting(self, ip):
self.disconnect()
def afterReconnecting(self, ip):
self.connect()
class VersionHandler(object):
"""Provides handler for a version query.
This class will answer version query and announce 'jabber:iq:version' namespace
in the client's disco#info results."""
implements(IIqHandlersProvider, IFeaturesProvider)
def __init__(self, client):
"""Just remember who created this."""
self.client = client
def get_features(self):
"""Return namespace which should the client include in its reply to a
disco#info query."""
return ["jabber:iq:version"]
def get_iq_get_handlers(self):
"""Return list of tuples (element_name, namespace, handler) describing
handlers of <iq type='get'/> stanzas"""
return [("query", "jabber:iq:version", self.get_version)]
def get_iq_set_handlers(self):
"""Return empty list, as this class provides no <iq type='set'/> stanza handler."""
return []
def get_version(self, iq):
"""Handler for jabber:iq:version queries.
jabber:iq:version queries are not supported directly by PyXMPP, so the
XML node is accessed directly through the libxml2 API. This should be
used very carefully!"""
iq = iq.make_result_response()
q = iq.new_query("jabber:iq:version")
q.newTextChild(q.ns(), "name", "Echo component")
q.newTextChild(q.ns(), "version", "1.0")
return iq
| estaban/pyload | module/plugins/hooks/XMPPInterface.py | Python | gpl-3.0 | 8,389 |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def dfs(root):
if root is None:
return (True, 0)
lb, lh = dfs(root.left)
rb, rh = dfs(root.right)
h = max(lh, rh) + 1
return (lb and rb and abs(lh - rh) <= 1, h)
return dfs(root)[0]
| jiadaizhao/LeetCode | 0101-0200/0110-Balanced Binary Tree/0110-Balanced Binary Tree.py | Python | mit | 493 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from distutils.version import LooseVersion
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms.api import create_page, create_title, add_plugin, assign_user_to_page
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from djangocms_text_ckeditor.models import Text
from cms.test_utils import testcases as base
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils import get_cms_setting
import django
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site
from django.contrib.auth.models import User, Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse)
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.encoding import smart_str
DJANGO_1_4 = LooseVersion(django.get_version()) < LooseVersion('1.5')
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False):
admin = self.get_superuser()
if admin_only:
return admin
USERNAME = 'test'
normal_guy = User.objects.create_user(USERNAME, 'test@test.com', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admin, normal_guy
class AdminTestCase(AdminTestsBase):
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin, normal_guy = self._get_guys()
site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=site, created_by=admin)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, None)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin, normal_guy = self._get_guys()
site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=site, created_by=admin)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
}
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_delete(self):
admin = self.get_superuser()
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin, published=True)
child = create_page('child-page', "nav_playground.html", "en",
created_by=admin, published=True, parent=page)
with self.login_user_context(admin):
data = {'post': 'yes'}
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
# TODO - The page should be marked for deletion, but nothing more
# until publishing
#self.assertRaises(Page.DoesNotExist, self.reload, page)
#self.assertRaises(Page.DoesNotExist, self.reload, child)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin.search_fields:
continue
url = reverse('admin:cms_%s_changelist' % model._meta.module_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
with self.login_user_context(admin):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_template(self):
admin, staff = self._get_guys()
request = self.get_request('/admin/cms/page/1/', 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = reverse('admin:cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = reverse('admin:cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
admin = self.get_superuser()
with self.login_user_context(admin):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin, published=True, parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin, published=True, parent=second_level_page_top)
self.assertEquals(Page.objects.all().count(), 4)
url = reverse('admin:cms_%s_changelist' % Page._meta.module_name)
request = self.get_request(url)
request.session = {}
request.user = admin
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_tree(self):
""" This test checks for proper jstree cookie unquoting.
It should be converted to a selenium test to actually test the jstree behaviour.
Cookie set below is just a forged example (from live session)
"""
admin = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin, published=True, parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin, published=True, parent=second_level_page_top)
url = reverse('admin:cms_%s_changelist' % Page._meta.module_name)
self.client.login(username='admin', password='admin')
self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2'
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context["open_menu_trees"], [1, 2])
# tests descendants method for the lazy load ajax call
url = "%s%d/descendants/" % (url, first_level_page.pk)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# should include both direct descendant pages
self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk)
self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk)
# but not any further down the tree
self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk)
def test_unihandecode_doesnt_break_404_in_admin(self):
admin = self.get_superuser()
self.client.login(username='admin', password='admin')
response = self.client.get('/en/admin/cms/page/1/?language=en')
self.assertEqual(response.status_code, 404)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
usr = User(username="admin", email="admin@django-cms.org", is_staff=True, is_superuser=True)
usr.set_password("admin")
usr.save()
return usr
def get_permless(self):
usr = User(username="permless", email="permless@django-cms.org", is_staff=True)
usr.set_password("permless")
usr.save()
return usr
def get_page(self):
return self.page
def test_get_moderation_state(self):
page = self.get_page()
permless = self.get_permless()
admin = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.get_moderation_states(request, page.pk)
self.assertEqual(response.status_code, 200)
with self.login_user_context(admin):
request = self.get_request()
response = self.admin_class.get_moderation_states(request, page.pk)
self.assertEqual(response.status_code, 200)
def test_change_status(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_status(request, page.pk)
self.assertEqual(response.status_code, 405)
page = self.reload(page)
self.assertFalse(page.published)
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_status(request, page.pk)
# Forbidden
self.assertEqual(response.status_code, 403)
self.assertFalse(page.published)
admin = self.get_admin()
with self.login_user_context(admin):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_status(request, page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertTrue(page.published)
response = self.admin_class.change_status(request, page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertFalse(page.published)
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin = self.get_admin()
with self.login_user_context(admin):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.change_status(request, page.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 405)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk)
self.assertEqual(response.status_code, 403)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk)
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin = self.get_admin()
self.page.publish() # Ensure public copy exists before reverting
with self.login_user_context(admin):
response = self.client.get(reverse('admin:cms_page_revert_page', args=(self.page.pk,)))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?edit_off'))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin = self.get_admin()
with self.login_user_context(admin):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder'})
self.assertRaises(ValueError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': ''})
self.assertEquals(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': ''})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"ok")
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': ''})
self.assertEquals(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': ''})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"ok")
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request,
404)
page = self.get_page()
page.publish()
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?edit' % base_url)
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?edit' % base_url)
site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = site
page.save()
page.publish()
self.assertTrue(page.is_home())
response = self.admin_class.preview_page(request, page.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?edit' % base_url)
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin = self.get_admin()
url = reverse('admin:cms_page_add_plugin')
with SettingsOverride(CMS_PERMISSION=False,
CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin = self.get_admin()
url = reverse('admin:cms_page_add_plugin')
with SettingsOverride(CMS_PERMISSION=False,
CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish()
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin):
response = self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty())
def test_edit_title_languages(self):
language = "en"
admin = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish()
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin):
response = self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty())
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
admin = User(
username='admin',
email='admin@admin.com',
is_active=True,
is_staff=True,
)
admin.set_password('admin')
admin.save()
return admin
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permssion_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
self.client.login(username='admin', password='admin')
url = reverse('admin:cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
self.client.login(username='test', password='test')
url = reverse('admin:cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
self.client.login(username='test', password='test')
url = reverse('admin:cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
self.client.login(username='test', password='test')
url = reverse('admin:cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
self.client.login(username='test', password='test')
url = reverse('admin:cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_page_permission_inline_visibility(self):
user = User(username='user', email='user@domain.com', password='user',
is_staff=True)
user.save()
self._give_page_permssion_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request,
page if not DJANGO_1_4 else None)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request,
page if not DJANGO_1_4 else None)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
self.client.login(username=user.username, password=user.username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
self.client.login(username=user.username, password=user.username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
self.client.login(username=another_user.username, password=another_user.username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin = self._get_admin()
self._give_cms_permissions(admin)
self._give_permission(admin, Text, 'add')
self.client.login(username='admin', password='admin')
url = reverse('admin:cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with SettingsOverride():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
# WTF? WHY DOES form.save() not handle this stuff???
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
site = Site.objects.get_current()
page1 = create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': site.pk,
'reverse_id': dupe_id,
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = site
page2.save()
with self.login_user_context(admin):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with SettingsOverride(CMS_PERMISSION=True):
admin = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=site, created_by=admin)
page.save()
self._page = page
with self.login_user_context(admin):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = User.objects.count()
# create additionals user and reload the page
User.objects.create(username=USER_NAME, is_active=True)
user_count = User.objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response._charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
| 11craft/django-cms | cms/tests/admin.py | Python | bsd-3-clause | 47,111 |
from other import setup
exec('x/_version.py')
setup(
name='x',
version=__version__
)
| github/codeql | python/ql/test/query-tests/Variables/undefined/uses_exec.py | Python | mit | 99 |
#!/usr/bin/python
import sys, argparse, re, gzip
from subprocess import Popen, PIPE
from Bio.Format.Sam import SAM
def main():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use BAM or - for SAM STDIN")
parser.add_argument('-o','--output',help="output file")
args = parser.parse_args()
of = sys.stdout
if args.output:
if re.search('\.gz$',args.output):
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
inf = sys.stdin
if args.input != '-':
cmd = "samtools view "+args.input
p = Popen(cmd.split(),stdout=PIPE)
inf = p.stdout
for line in inf:
v = SAM(line)
for rng in [x.get_range() for x in v.get_target_transcript(68).exons]:
of.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
if args.input != '-':
p.communicate()
of.close()
if __name__=="__main__":
main()
| jason-weirather/Au-public | iron/utilities/bam_to_bed_exons.py | Python | apache-2.0 | 964 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2017_03_01.models.EffectiveNetworkSecurityGroup]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, *, value=None, next_link: str=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/effective_network_security_group_list_result_py3.py | Python | mit | 1,288 |
#!/usr/bin/env python
"""This script solves the Project Euler problem "Even Fibonacci numbers". The
problem is: By considering the terms in the Fibonacci sequence whose values do
not exceed four million, find the sum of the even-valued terms.
"""
def main():
"""Even Fibonacci numbers"""
# Constants
MAX_FIB = 4000000
fib_even1 = 0
fib_even2 = 2
sum_even = 0
while fib_even2 < MAX_FIB:
fib_even1, fib_even2 = fib_even2, 4 * fib_even2 + fib_even1
sum_even += fib_even1
print(sum_even)
if __name__ == '__main__':
main()
| iansealy/projecteuler | optimal/2.py | Python | gpl-3.0 | 578 |
#! /usr/bin/env python3
"""
This file is used to setup and maintain a connection with an IRC server.
"""
import traceback
import socket
import random
import time
import sys
# as the file is loaded separately, the imports have to be adjusted.
sys.path.append('./PlebNet')
from plebnet.agent.qtable import QTable
from plebnet.agent.core import vpn_is_running
from plebnet.communication import git_issuer
from plebnet.controllers import wallet_controller, market_controller, tribler_controller
from plebnet.utilities import logger
from plebnet.settings import plebnet_settings
class Create(object):
"""
The object which maintains the server connection
"""
def __init__(self):
logger.log("preparing an IRC connection")
# load required settings once
settings = plebnet_settings.get_instance()
self.server = settings.irc_server()
self.timeout = settings.irc_timeout()
self.channel = settings.irc_channel()
self.port = settings.irc_port()
self.nick = settings.irc_nick()
nick_number = self.nick[len(settings.irc_nick_def()):]
self.ident = "plebber"
self.gecos = "Plebbot version 2.15"
self.irc = None
self.init_time = time.time()
self.last_beat = time.time()
# prep reply functions
self.responses = {}
self.add_response("alive", self.msg_alive)
self.add_response("error", self.msg_error)
self.add_response("init", self.msg_init)
self.add_response("joke", self.msg_joke)
self.add_response("MB_wallet", self.msg_MB_wallet)
self.add_response("BTC_wallet", self.msg_BTC_wallet)
self.add_response("TBTC_wallet", self.msg_TBTC_wallet)
self.add_response("MB_balance", self.msg_MB_balance)
self.add_response("BTC_balance", self.msg_BTC_balance)
self.add_response("TBTC_balance", self.msg_TBTC_balance)
self.add_response("matchmakers", self.msg_match_makers)
self.add_response("uploaded", self.msg_uploaded)
self.add_response("downloaded", self.msg_downloaded)
self.add_response("general", self.msg_general)
self.add_response("helped", self.msg_helped)
self.add_response("helped_by", self.msg_helped_by)
self.add_response("qtables" + str(nick_number), self.msg_qtable)
# start running the IRC server
self.init_irc()
self.run()
def add_response(self, command, response):
"""
This method is used to add new commands to the IRC-bot.
:param command: the command (after !) which should trigger the provided method
:type command: String
:param response: The method to call as the command is received
:type response: a method
"""
self.responses[":!" + command] = response
def init_irc(self):
try:
logger.log("start running an IRC connection on " + self.server + " " + self.channel)
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.irc.connect((self.server, self.port))
except:
title = "A error occurred in IRCbot init_irc %s" % sys.exc_info()[0]
body = traceback.format_exc()
logger.error(title)
logger.error(body)
git_issuer.handle_error(title, body)
git_issuer.handle_error("A error occurred in IRCbot", sys.exc_info()[0], ['crash'])
def run(self):
"""
This method keeps listening to the server for incoming messages and processes them.
"""
self.send("NICK %s" % self.nick)
self.send("USER %s %s %s : %s" % (self.nick, self.nick, self.nick, self.gecos))
self.heartbeat()
buffer = ""
while True:
buffer = self.keep_running(buffer)
def keep_running(self, buffer):
try:
buffer += self.irc.recv(2048).decode()
lines = str.split(buffer, "\r\n")
buffer = lines.pop()
for line in lines:
logger.log("Received IRC message: " + line)
for line in lines:
self.handle_line(line)
except KeyboardInterrupt:
st = "QUIT :I have to go for now!"
self.irc.send(st)
raise
except:
title = "A error occurred in IRCbot %s" % sys.exc_info()[0]
body = traceback.format_exc()
logger.error(title)
logger.error(body)
git_issuer.handle_error(title, body)
self.send_msg(title)
return buffer
def heartbeat(self):
"""
This method sends a heartbeat to the IRC server when it is called.
"""
timer = time.time()
elapsed_time = timer - self.last_beat
if elapsed_time > self.timeout:
self.last_beat = timer
time_str = time.strftime("%H:%M:%S", time.gmtime(timer - self.init_time))
logger.log("IRC is still running - alive for " + time_str)
self.send_msg("IRC is still running - alive for %s" % time_str)
def handle_line(self, line):
"""
This method handles a line received from the IRC server.
:param line: The line to process
:type line: String
"""
line = str.rstrip(line)
words = str.split(line)
# playing ping-pong with a key (words[1])
if words[0] == "PING":
st = "PONG %s" % words[1]
self.send(st)
# server status 433 --> nickname is already in use, so we chose a new one
elif line.find("433 * " + self.nick) != -1:
settings = plebnet_settings.get_instance()
settings.irc_nick(settings.irc_nick_def() + str(random.randint(1000, 10000)))
self.nick = settings.irc_nick()
self.send("NICK %s" % self.nick)
self.send("USER %s %s %s : %s" % (self.nick, self.nick, self.nick, self.gecos))
# server status 376 and 422 means ready to join a channel
elif line.find("376 " + self.nick) != -1 or line.find("422 " + self.nick) != -1:
st = "JOIN " + self.channel
self.send(st)
# handle incoming messages
elif len(words) > 3 and words[3] in self.responses:
self.responses[words[3]]()
"""
THE SENDER METHODS
These handle the outgoing messages
"""
def send(self, msg):
logger.log("Sending IRC message: %s" % msg)
msg2 = "%s\r\n" % msg
self.irc.send(msg2.encode())
def send_msg(self, msg):
self.send("PRIVMSG %s :%s" % (self.channel, msg))
"""
THE RESPONSES (don't forget to add them to the self.responses in the init method)
These methods are used to determine the response to received commands
"""
def msg_alive(self):
time_str = time.strftime("%j days + %H:%M:%S", time.gmtime(time.time() - self.init_time))
self.send_msg("I am alive, for %s" % time_str)
def msg_error(self):
self.send_msg("Let me create an error ...")
raise Exception('This is an error for testing purposes')
def msg_init(self): self.send_msg("My init date is: %s" % plebnet_settings.get_instance().vps_life())
def msg_joke(self): self.send_msg("Q: Why did the hipster burn his tongue? A: He ate the pizza before it was cool.")
def msg_MB_wallet(self): self.send_msg("My MB wallet is: %s" % wallet_controller.get_MB_wallet())
def msg_BTC_wallet(self): self.send_msg("My BTC wallet is: %s" % wallet_controller.get_BTC_wallet())
def msg_TBTC_wallet(self): self.send_msg("My TBTC wallet is: %s" % wallet_controller.get_TBTC_wallet())
def msg_MB_balance(self): self.send_msg("My MB balance is: %s" % wallet_controller.get_MB_balance())
def msg_BTC_balance(self): self.send_msg("My BTC balance is: %s" % wallet_controller.get_BTC_balance())
def msg_TBTC_balance(self): self.send_msg("My TBTC balance is: %s" % wallet_controller.get_TBTC_balance())
def msg_match_makers(self): self.send_msg("I currently have: %s matchmakers" % market_controller.match_makers())
def msg_uploaded(self): self.send_msg("I currently have uploaded: %s MB" % tribler_controller.get_uploaded())
def msg_downloaded(self): self.send_msg("I currently have downloaded: %s MB" % tribler_controller.get_downloaded())
def msg_helped(self): self.send_msg("I currently have helped: %s peers" % tribler_controller.get_helped())
def msg_helped_by(self): self.send_msg("I am currently helped by: %s peers" % tribler_controller.get_helped_by())
def msg_general(self):
qtable = QTable()
qtable.read_dictionary()
data = {
'host': qtable.self_state.provider,
'option': qtable.self_state.option,
'vpn': vpn_is_running(),
'tree': qtable.tree,
'exitnode': plebnet_settings.get_instance().tribler_exitnode()
}
self.send_msg("general: %s" % data)
def msg_qtable(self):
qtable = QTable()
qtable.read_dictionary()
headers = ["-"]
table = []
header_dict = {}
# get all the available vps options
for k, v in qtable.qtable.items():
shorter_item = k.split(" ")[0].split("_")[0] + "1"
num = 1
while shorter_item in headers:
num += 1
shorter_item = shorter_item.replace(shorter_item[-1], str(num))
headers.append(shorter_item)
header_dict[k] = shorter_item
# get the rest of the table
index = 0
for k, v in qtable.qtable.items():
table.append([header_dict[k]])
for k2, v2 in v.items():
table[index].append(str(v2))
index += 1
# get the format string used for each line in the table
formatt = "{:<%i} "
max_len = 20
for vps_service in headers[1:]:
max_len = max(len(vps_service) + 2, max_len)
formatt += "{:<%i} " % max(20, (len(vps_service) + 2))
formatt = formatt % max_len
formatt = formatt[:-1]
headers[0] *= (max_len - 2)
# send individual messages for each line of the qtable
# , because IRC only supports up until 512 characters per message
self.send_msg(formatt.format(*headers))
# message = formatt.format(*headers)
time.sleep(3)
for line in table:
time.sleep(3)
# message += formatt.format(*line)
self.send_msg(formatt.format(*line))
# self.send_msg(message)
if __name__ == '__main__':
Create()
| rjwvandenberg/PlebNet | plebnet/communication/irc/ircbot.py | Python | lgpl-3.0 | 10,976 |
"""
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geos/
"""
from .collections import ( # NOQA
GeometryCollection, MultiLineString, MultiPoint, MultiPolygon,
)
from .error import GEOSException # NOQA
from .factory import fromfile, fromstr # NOQA
from .geometry import GEOSGeometry, hex_regex, wkt_regex # NOQA
from .io import WKBReader, WKBWriter, WKTReader, WKTWriter # NOQA
from .libgeos import geos_version, geos_version_info # NOQA
from .linestring import LinearRing, LineString # NOQA
from .point import Point # NOQA
from .polygon import Polygon # NOQA
try:
HAS_GEOS = geos_version_info()['version'] >= '3.3.0'
except ImportError:
HAS_GEOS = False
| yephper/django | django/contrib/gis/geos/__init__.py | Python | bsd-3-clause | 791 |
'''
Harvester for the eScholarship@UMMS for the SHARE project
Example API call: http://escholarship.umassmed.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class UmassmedHarvester(OAIHarvester):
short_name = 'umassmed'
long_name = 'eScholarship@UMMS'
url = 'http://escholarship.umassmed.edu'
base_url = 'http://escholarship.umassmed.edu/do/oai/'
property_list = ['rights', 'source', 'relation', 'date', 'identifier', 'type', 'setSpec']
timezone_granularity = True
| erinspace/scrapi | scrapi/harvesters/umassmed.py | Python | apache-2.0 | 578 |
# Copyright (C) 2014 Andreas M. Weller <andreas.m.weller@gmail.com>
import os
def unite(output, target_folder = "./"):
files = [x for x in os.listdir(target_folder) if x.endswith("_coverage.csv")]
with open(target_folder + output, "w") as out:
for filename in files:
sample = filename[:-13]
with open(target_folder + filename) as handle:
for row in handle:
result = sample +"\t"+ row
out.write(result)
return target_folder + output
####################################################################################
if __name__ == '__main__':
files = [x for x in os.listdir(".") if x.endswith("_coverage.csv")]
for filename in files:
sample = filename[:-13]
with open(filename) as handle:
for row in handle:
print sample +"\t"+ row,
| aweller/CoverageCheck | unite_coverage_files.py | Python | bsd-3-clause | 791 |
# This file is part of Medieer.
#
# Medieer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medieer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
import re
from apibase import *
from data.models import NSCommon, Media
class TVDB(APIBase):
apikey = '1B9D1199533C99BB'
path_format = '/api/%(api)s?%(method)s=%(term)s'
protocol = 'http'
host = 'www.thetvdb.com'
def lookup(self, title_string):
self.parseTitleFromFilename(title_string)
self.getSeries()
def getSeries(self):
pass | toddself/Medieer | src/lib/tvdb.py | Python | gpl-3.0 | 1,094 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = ["absolute_difference",
"add_loss",
"cosine_distance",
"compute_weighted_loss",
"get_losses",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy"]
# TODO(b/32171727): Remove when deprecated `targets` is removed.
def _labels(labels, targets):
if labels is None:
labels = targets
elif targets is not None:
raise ValueError("Can not specify both `labels` and `targets`.")
if labels is None:
raise ValueError("Must provide 1 of `labels` and `targets`.")
return labels
# TODO(b/32171727): Remove when deprecated `weight` is removed.
_WEIGHT_SENTINEL = object()
# TODO(b/32171727): Remove when deprecated `weight` is removed. Also, restore
# weights=1.0 as default in all calling fns.
def _weights(weights, weight):
if weights is _WEIGHT_SENTINEL:
weights = weight
elif weight is not _WEIGHT_SENTINEL:
raise ValueError("Can not specify both `weights` and `weight`.")
if weights is None:
raise ValueError("`weights` cannot be None.")
if weights is _WEIGHT_SENTINEL:
weights = 1.0
return weights
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.mul(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`.", "weight")
def compute_weighted_loss(
losses, weights=_WEIGHT_SENTINEL, scope=None, weight=_WEIGHT_SENTINEL):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weight.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of loss variables.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def absolute_difference(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds an Absolute Difference loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.sub(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.sigmoid_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def sigmoid_cross_entropy(
logits, multi_class_labels, weights=_WEIGHT_SENTINEL, label_smoothing=0,
scope=None, weight=_WEIGHT_SENTINEL):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] target labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weight` is invalid, or if
`weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(logits, multi_class_labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.softmax_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def softmax_cross_entropy(
logits, onehot_labels, weights=_WEIGHT_SENTINEL, label_smoothing=0,
scope=None, weight=_WEIGHT_SENTINEL):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] target one_hot_encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weight` is invalid or if `weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(logits, onehot_labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.sparse_softmax_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def sparse_softmax_cross_entropy(
logits, labels, weights=_WEIGHT_SENTINEL, scope=None,
weight=_WEIGHT_SENTINEL):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] target labels of dtype `int32` or
`int64` in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
weights = array_ops.squeeze(weights)
losses = nn.sparse_softmax_cross_entropy_with_logits(logits, labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.log_loss instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def log_loss(
predictions, labels=None, weights=_WEIGHT_SENTINEL, epsilon=1e-7,
scope=None, targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Log Loss term to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.mul(
labels,
math_ops.log(predictions + epsilon)) - math_ops.mul(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.hinge_loss instead.")
@deprecated_args(
"2016-11-25", "`target` is being deprecated, use `labels`.", "target")
def hinge_loss(logits, labels=None, scope=None, target=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
target: Deprecated alias for `labels`.
Returns:
A `Tensor` of same shape as logits and target representing the loss values
across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
labels = _labels(labels, target)
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.sub(2 * labels, all_ones)
return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def mean_squared_error(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Sum-of-Squares loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.sub(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.mean_pairwise_squared_error instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def mean_pairwise_squared_error(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.sub(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def cosine_distance(
predictions, labels=None, dim=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.mul(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights, scope=scope)
| ppries/tensorflow | tensorflow/contrib/losses/python/losses/loss_ops.py | Python | apache-2.0 | 30,744 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('pacientes', '0012_auto_20150505_2053'),
]
operations = [
migrations.AlterField(
model_name='pacientes',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 5, 6, 1, 17, 17, 242433), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='pacientes',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 5, 6, 1, 17, 17, 242375), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
]
| btenaglia/hpc-historias-clinicas | hpc-historias-clinicas/pacientes/migrations/0013_auto_20150506_0117.py | Python | bsd-3-clause | 874 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Moritz Luca Schmid, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT).
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
from gnuradio import audio
import os
import grdab_swig as grdab
from gnuradio import audio
class qa_dabplus_audio_decoder_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
# manual check, if header info makes sense and if AAC gives errors
# final check by playing the produced audio file, you may have to adjust the wavfile_sink configuration arguments to printed header info
def test_001_t (self):
if os.path.exists("debug/transmission_frame.dat") and os.path.exists("debug/transmission_frame_trigger.dat"):
self.dab_params = grdab.parameters.dab_parameters(1, 208.064e6, True)
self.src01 = blocks.file_source_make(gr.sizeof_float * 2 * self.dab_params.num_carriers,
"debug/transmission_frame.dat")
self.src02 = blocks.file_source_make(gr.sizeof_char, "debug/transmission_frame_trigger.dat")
self.dabplus = grdab.dabplus_audio_decoder_ff(self.dab_params, 112, 54, 84, 2, True, False, True)
self.wav_sink = blocks.wavfile_sink_make("debug/music.wav", 2, 32000)
self.file_sink_left = blocks.file_sink_make(gr.sizeof_float, "debug/PCM_left.dat")
self.file_sink_right = blocks.file_sink_make(gr.sizeof_float, "debug/PCM_right.dat")
self.tb.connect(self.src01, (self.dabplus, 0), self.file_sink_left)
self.tb.connect(self.src02, (self.dabplus, 1), self.file_sink_right)
self.tb.connect((self.dabplus, 0), (self.wav_sink, 0))
self.tb.connect((self.dabplus, 1), (self.wav_sink, 1))
self.audio = audio.sink_make(32000)
self.tb.connect((self.dabplus, 0), (self.audio, 0))
self.tb.connect((self.dabplus, 1), (self.audio, 1))
self.tb.run()
else:
log = gr.logger("log")
log.debug("debug file not found - skipped test")
log.set_level("WARN")
pass
if __name__ == '__main__':
gr_unittest.run(qa_dabplus_audio_decoder_ff, "qa_dabplus_audio_decoder_ff.xml")
| andrmuel/gr-dab | python/qa_dabplus_audio_decoder_ff.py | Python | gpl-3.0 | 3,065 |
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2016 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class authorization(object):
def __init__(self, zap):
self.zap = zap
def get_authorization_detection_method(self, contextid):
"""
Obtains all the configuration of the authorization detection method that is currently set for a context.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authorization/view/getAuthorizationDetectionMethod/', {'contextId': contextid})))
def set_basic_authorization_detection_method(self, contextid, headerregex=None, bodyregex=None, statuscode=None, logicaloperator=None, apikey=''):
"""
Sets the authorization detection method for a context as one that identifies un-authorized messages based on: the message's status code or a regex pattern in the response's header or body. Also, whether all conditions must match or just some can be specified via the logicalOperator parameter, which accepts two values: "AND" (default), "OR".
"""
params = {'contextId': contextid, 'apikey': apikey}
if headerregex is not None:
params['headerRegex'] = headerregex
if bodyregex is not None:
params['bodyRegex'] = bodyregex
if statuscode is not None:
params['statusCode'] = statuscode
if logicaloperator is not None:
params['logicalOperator'] = logicaloperator
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authorization/action/setBasicAuthorizationDetectionMethod/', params)))
| Woolworths/zap-api-python | src/zapv2/authorization.py | Python | apache-2.0 | 2,284 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import base64
from ansible.errors import AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe, is_subpath
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for fetch operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
try:
if self._play_context.check_mode:
raise AnsibleActionSkip('check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
original_dest = dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'), strict=False)
fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
msg = ''
# validate source and dest are strings FIXME: use basic.py and module specs
if not isinstance(source, string_types):
msg = "Invalid type supplied for source option, it must be a string"
if not isinstance(dest, string_types):
msg = "Invalid type supplied for dest option, it must be a string"
if source is None or dest is None:
msg = "src and dest are required"
if msg:
raise AnsibleActionFail(msg)
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
remote_checksum = None
if not self._connection.become:
# calculate checksum for the remote file, don't bother if using become as slurp will be used
# Force remote_checksum to follow symlinks because fetch always follows symlinks
remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
# use slurp if permissions are lacking or privilege escalation is needed
remote_data = None
if remote_checksum in ('1', '2', None):
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars)
if slurpres.get('failed'):
if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
result['msg'] = "the remote file does not exist, not transferring, ignored"
result['file'] = source
result['changed'] = False
else:
result.update(slurpres)
return result
else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
# ensure we only use file name, avoid relative paths
if not is_subpath(dest, original_dest):
# TODO: ? dest = os.path.expanduser(dest.replace(('../','')))
raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest))
if flat:
if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory")
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
if remote_checksum in ('0', '1', '2', '3', '4', '5'):
result['changed'] = False
result['file'] = source
if remote_checksum == '0':
result['msg'] = "unable to calculate the checksum of the remote file"
elif remote_checksum == '1':
result['msg'] = "the remote file does not exist"
elif remote_checksum == '2':
result['msg'] = "no read permission on remote file"
elif remote_checksum == '3':
result['msg'] = "remote file is a directory, fetch cannot work on directories"
elif remote_checksum == '4':
result['msg'] = "python isn't present on the system. Unable to compute checksum"
elif remote_checksum == '5':
result['msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed"
# Historically, these don't fail because you may want to transfer
# a log file that possibly MAY exist but keep going to fetch other
# log files. Today, this is better achieved by adding
# ignore_errors or failed_when to the task. Control the behaviour
# via fail_when_missing
if fail_on_missing:
result['failed'] = True
del result['changed']
else:
result['msg'] += ", not transferring, ignored"
return result
dest = os.path.normpath(dest)
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleActionFail("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
result.update(dict(failed=True, md5sum=new_md5,
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
checksum=new_checksum, remote_checksum=remote_checksum))
else:
result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
'remote_md5sum': None, 'checksum': new_checksum,
'remote_checksum': remote_checksum})
else:
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| azaghal/ansible | lib/ansible/plugins/action/fetch.py | Python | gpl-3.0 | 9,755 |
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe, Brian Holt
# License: BSD 3
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from nose.tools import assert_true
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
np.random.seed([1])
perm = np.random.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = np.random.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
# Random forest
clf = RandomForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = RandomForestClassifier(n_estimators=10, max_features=1,
random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ExtraTreesClassifier(n_estimators=10, max_features=1,
random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
def test_iris():
"""Check consistency on dataset iris."""
for c in ("gini", "entropy"):
# Random forest
clf = RandomForestClassifier(n_estimators=10, criterion=c,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
clf = RandomForestClassifier(n_estimators=10, criterion=c,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, "Failed with criterion %s and score = %f" % (c,
score)
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, criterion=c,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
clf = ExtraTreesClassifier(n_estimators=10, criterion=c,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ("mse",):
# Random forest
clf = RandomForestRegressor(n_estimators=5, criterion=c,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score < 3, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
clf = RandomForestRegressor(n_estimators=5, criterion=c,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score < 3, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
# Extra-trees
clf = ExtraTreesRegressor(n_estimators=5, criterion=c, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score < 3, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
clf = ExtraTreesRegressor(n_estimators=5, criterion=c, max_features=6,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score < 3, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
def test_probability():
"""Predict probabilities."""
# Random forest
clf = RandomForestClassifier(n_estimators=10, random_state=1,
max_features=1, max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
clf = RandomForestClassifier(n_estimators=10, compute_importances=True)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_true(0 < X_new.shape[1] < X.shape[1])
def test_oob_score_classification():
"""Check that oob prediction is as acurate as
usual prediction on the training set.
Not really a good test that prediction is independent."""
clf = RandomForestClassifier(oob_score=True)
clf.fit(X, y)
training_score = clf.score(X, y)
assert_almost_equal(training_score, clf.oob_score_)
def test_oob_score_regression():
"""Check that oob prediction is pessimistic estimate.
Not really a good test that prediction is independent."""
clf = RandomForestRegressor(n_estimators=30, oob_score=True)
n_samples = boston.data.shape[0]
clf.fit(boston.data[:n_samples / 2, :], boston.target[:n_samples / 2])
test_score = clf.score(boston.data[n_samples / 2:, :],
boston.target[n_samples / 2:])
assert_true(test_score > clf.oob_score_)
assert_true(clf.oob_score_ > .8)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# Random forest
forest = RandomForestClassifier()
parameters = {'n_estimators': (1, 2),
'max_depth': (1, 2)}
clf = GridSearchCV(forest, parameters)
clf.fit(iris.data, iris.target)
# Extra-trees
forest = ExtraTreesClassifier()
parameters = {'n_estimators': (1, 2),
'max_depth': (1, 2)}
clf = GridSearchCV(forest, parameters)
clf.fit(iris.data, iris.target)
def test_parallel():
"""Check parallel computations."""
# Classification
forest = RandomForestClassifier(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(iris.data, iris.target)
assert_true(10 == len(forest))
forest.set_params(n_jobs=1)
y1 = forest.predict(iris.data)
forest.set_params(n_jobs=2)
y2 = forest.predict(iris.data)
assert_array_equal(y1, y2)
# Regression
forest = RandomForestRegressor(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(boston.data, boston.target)
assert_true(10 == len(forest))
forest.set_params(n_jobs=1)
y1 = forest.predict(boston.data)
forest.set_params(n_jobs=2)
y2 = forest.predict(boston.data)
assert_array_almost_equal(y1, y2, 10)
# Use all cores on the classification dataset
forest = RandomForestClassifier(n_jobs=-1)
forest.fit(iris.data, iris.target)
def test_pickle():
"""Check pickability."""
import pickle
# Random forest
obj = RandomForestClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_true(score == score2)
obj = RandomForestRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_true(score == score2)
# Extra-trees
obj = ExtraTreesClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_true(score == score2)
obj = ExtraTreesRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_true(score == score2)
if __name__ == "__main__":
import nose
nose.runmodule()
| cdegroc/scikit-learn | sklearn/ensemble/tests/test_forest.py | Python | bsd-3-clause | 10,632 |
from pathlib import Path
def parse_args():
import argparse #perfectly acceptable to do the import in the function
epilog = 'message displayed at the end of the help message'
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('-s', '--string', dest='string', type=str, help='arg takes a string', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-f', '--flag', dest='do_flag', action='store_true', help='do flag')
group.add_argument('-g', dest='do_gag', action='store_true')
parser.add_argument('--print-this', type=str, help='string to print')
parser.add_argument('--input_file', type=argparse.FileType('rb', 0), help='input file to read') #understands "-" as stdin
parser.add_argument('--output_file', type=argparse.FileType('rb', 'UTF-8'), help='file to output') #understands "-" as stout
parser.add_argument('--choice', choices=['apple', 'orange', 'banana'])
#parser.add_argument('others', nargs='?') #consumes 1 (optional) positional argument
#parser.add_argument('others', nargs='+') #consumes all positional arguments (err if no args)
#parser.add_argument('others', nargs=2) #expects 2 positional arguments
parser.add_argument('others', nargs='*') #expects any number of positional arguments
args = parser.parse_args()
return args
def main():
args = parse_args()
for name,arg in args._get_kwargs():
print(name, arg)
if args.do_flag:
print('doing flag')
if args.do_gag:
print('doing gag')
if args.print_this:
print(f'printing this: {args.print_this}')
print(f'string argument: {args.string}')
for each in args.others:
print(f'optional arg: {each}')
if args.choice:
print(f'choice was: {args.choice}')
if args.input_file:
print(f'file name: {args.input_file.name}')
print(Path(args.input_file.name).resolve())
print(args.input_file.read())
if __name__ == '__main__':
main()
| JesseAChristensen/linuxprofile | python/arg_parsing.py | Python | gpl-3.0 | 2,023 |
from . import BitmovinError
class InvalidTypeError(BitmovinError):
pass
| bitmovin/bitmovin-python | bitmovin/errors/invalid_type_error.py | Python | unlicense | 78 |
"""
run py.test
""" | joshmorel/datacompare | datacompare/example.py | Python | mit | 21 |
from dolfin import *
from math import exp, sqrt, pi
import sw_lib
params=sw_lib.parameters({
'depth' : 2.,
'g' : 9.81,
'f' : 0.0,
'dump_period' : 1,
'eta0' : 2 # Wave height
})
# Basin radius.
basin_x=3000 # The length of the basin
basin_y=1000 # The width of the basin
nx=20 # Number of cells in x direction
ny=3 # Number of cells in y direction
# Long wave celerity.
c=sqrt(params["g"]*params["depth"])
params["finish_time"]=100
params["dt"]=params["finish_time"]/4000.
class InitialConditions(Expression):
def __init__(self):
pass
def eval(self, values, X):
values[0]=params['eta0']*sqrt(params['g']*params['depth'])*cos(pi*X[0]/3000)
values[1]=0.
values[2]=params['eta0']*cos(pi*X[0]/3000)
def value_shape(self):
return (3,)
try:
mesh = RectangleMesh(0, 0, basin_x, basin_y, nx, ny)
except:
mesh = Rectangle(0, 0, basin_x, basin_y, nx, ny)
mesh.order()
mesh.init()
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], basin_x)
class Sides(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and (near(x[1], 0.0) or near(x[1], basin_y))
# Initialize sub-domain instances
left = Left()
right = Right()
sides = Sides()
# Initialize mesh function for boundary domains
try:
boundaries = FacetFunction("sizet", mesh)
except:
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
left.mark(boundaries, 1)
right.mark(boundaries, 2)
sides.mark(boundaries, 3)
ds = Measure("ds")[boundaries]
| ellipsis14/dolfin-adjoint | tests_dolfin/shallow_water_time_functional/divett.py | Python | lgpl-3.0 | 1,694 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from casadi.tools import *
import casadi as c
from numpy import *
import unittest
from types import *
from helpers import *
class SDPtests(casadiTestCase):
@requires("DSDPSolver")
def test_memleak1(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
@requires("DSDPSolver")
def test_memleak2(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
@requires("DSDPSolver")
def test_scalar(self):
self.message("scalar")
#
# min n1*x
# x
# n3*x-n2>=0
#
# -> x = n2/n3
#
# 1 active constraint, cost: d(n1*x)/d(n*x-n2) = 1/[d(n3*x-n2)/d(n1*x)] = n1/n3
n1 = 3.1
n2 = 2.3
n3 = 4.7
b = DMatrix(n1)
Ai = [DMatrix(n3)]
A = vertcat(Ai)
makeSparse(A)
C = DMatrix(n2)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_equality(self):
self.message("linear equality")
# min n1*x
# x
#
# n3*x-n2 >= 0 |__ n3*x == n2
# -(n3*x-n2) >= 0 |
#
# solution: x=n2/n3
n3 = 1.7
n1 = 2.1
n2 = 1.3
b = DMatrix([n1])
Ai = [ blkdiag([n3,-n3])]
C = blkdiag([n2,-n2])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
self.checkarray(dsp.output("dual")[0,0]-dsp.output("dual")[1,1],DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation1(self):
self.message("linear interpolation1")
# min 2*x0 + x1*3
# x0,x1
# x0+x1 - 1 >=0 --> x0+x1>=1
# x0 >=0
# x1 >=0
#
# solution: x0=1, x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([1,1,0]), blkdiag([1,0,1])]
C = blkdiag([1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[0,0,0],[0,1,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[2,0,0],[0,0,0],[0,0,1]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation2(self):
self.message("linear interpolation2")
# min 2*x0 + 3*x1
# x0,x1
# -(x0 + x1 -1) >=0 --> x0 + x1 <= 1
# x0 >=0
# x1 >=0
#
# solution: x0=0 , x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([-1,1,0]), blkdiag([-1,0,1])]
C = blkdiag([-1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[1,0,0],[0,0,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[0,0,0],[0,2,0],[0,0,3]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation(self):
self.message("linear interpolation")
# min 2*a + (1-a)*4
# a
# 0 <= a <= 1
#
# Translates to:
# min 2*x0 + 4*x1
# x0,x1
# x0 + x1 -1 >= 0 |__ x0 + x1 == 1
# -(x0 + x1 -1) >= 0 |
# x0 >= 0
# x1 >= 0
b = DMatrix([2,4])
Ai = [ blkdiag([1,-1,1,0]), blkdiag([1,-1,0,1])]
e = 1e-6
C = blkdiag([1,-(1+e),0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),diag([0,0,1,0]),digits=5)
self.checkarray(dsp.output("dual"),diag([2,0,0,2]),digits=2)
@requires("DSDPSolver")
def test_example1(self):
self.message("Example1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
Ai = [DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])]
A = vertcat(Ai)
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([-1.1,-2.7375,-0.55]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[5.9,-1.375],[-1.375,1]]),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
V = struct_ssym([
entry("L",shape=C.shape),
entry("x",shape=b.size())
])
L = V["L"]
x = V["x"]
P = mul(L,L.T)
g = []
g.append(sum([Ai[i]*x[i] for i in range(3)]) - C - P)
f = SXFunction([V],[mul(b.T,x)])
g = SXFunction([V],[veccat(g)])
sol = IpoptSolver(f,g)
sol.init()
sol.setInput(0,"lbg")
sol.setInput(0,"ubg")
sol.setInput(1,"x0")
sol.evaluate()
sol_ = V(sol.output())
self.checkarray(sol_["x"],DMatrix([-1.1,-2.7375,-0.55]),digits=5)
@requires("DSDPSolver")
def test_example2(self):
self.message("Example2")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data) for data in flatdata])
makeSparse(A)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20626923535e1),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1.551644595,0.6709672545,0.9814916693,1.406569511,0.9421687787]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(sp,[2.640261206,0.5605636589,0.5605636589,3.717637107,0.7615505416,-1.513524657,1.139370202,-1.513524657,3.008016978,-2.264413045,1.139370202,-2.264413045,1.704633559,0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix(sp,[0,0,0,0,7.119155551,5.024671489,1.916294752,5.024671489,4.414745792,2.506021978,1.916294752,2.506021978,2.048124139,0.3432465654,4.391169489]),digits=5)
@requires("DSDPSolver")
def test_example2_perm(self):
self.message("Example2_permuted")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
perm = [5,2,1,0,6,3,4]
permi = lookupvector(perm,len(perm))
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data)[perm,perm] for data in flatdata])
makeSparse(A)
C = C[perm,perm]
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20626923535e1),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1.551644595,0.6709672545,0.9814916693,1.406569511,0.9421687787]),digits=5)
self.checkarray(dsp.output("dual")[permi,permi],DMatrix(sp,[2.640261206,0.5605636589,0.5605636589,3.717637107,0.7615505416,-1.513524657,1.139370202,-1.513524657,3.008016978,-2.264413045,1.139370202,-2.264413045,1.704633559,0,0]),digits=5)
self.checkarray(dsp.output("p")[permi,permi],DMatrix(sp,[0,0,0,0,7.119155551,5.024671489,1.916294752,5.024671489,4.414745792,2.506021978,1.916294752,2.506021978,2.048124139,0.3432465654,4.391169489]),digits=5)
if __name__ == '__main__':
unittest.main()
| jgillis/casadi | test/python/sdp.py | Python | lgpl-3.0 | 12,021 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSquarem(RPackage):
"""Algorithms for accelerating the convergence of slow, monotone sequences
from smooth, contraction mapping such as the EM algorithm. It can be used
to accelerate any smooth, linearly convergent acceleration scheme. A
tutorial style introduction to this package is available in a vignette on
the CRAN download page or, when the package is loaded in an R session, with
vignette("SQUAREM")."""
homepage = "http://www.jhsph.edu/agingandhealth/People/Faculty_personal_pages/Varadhan.html"
url = "https://cloud.r-project.org/src/contrib/SQUAREM_2017.10-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/SQUAREM"
version('2017.10-1', sha256='9b89905b436f1cf3faa9e3dabc585a76299e729e85ca659bfddb4b7cba11b283')
depends_on('r@3.0:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-squarem/package.py | Python | lgpl-2.1 | 1,065 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
import sys
from contextlib import contextmanager
from pathlib import Path, PurePath
from textwrap import dedent
from typing import Dict, Iterable, Optional
import pytest
from pex.interpreter import PythonInterpreter
from pkg_resources import Requirement, WorkingSet
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import Pex, PexProcess, PexRequest, PexRequirements
from pants.core.util_rules import archive, external_tool
from pants.engine.environment import CompleteEnvironment
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests, Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.process import ProcessResult
from pants.init.options_initializer import create_bootstrap_scheduler
from pants.init.plugin_resolver import PluginResolver
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.testutil.python_interpreter_selection import (
PY_36,
PY_37,
python_interpreter_path,
skip_unless_python36_and_python37_present,
)
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_rmtree, touch
DEFAULT_VERSION = "0.0.0"
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*pex.rules(),
*external_tool.rules(),
*archive.rules(),
QueryRule(Pex, [PexRequest]),
QueryRule(ProcessResult, [PexProcess]),
]
)
rule_runner.set_options(
["--backend-packages=pants.backend.python"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
return rule_runner
def _create_pex(
rule_runner: RuleRunner,
interpreter_constraints: InterpreterConstraints,
) -> Pex:
request = PexRequest(
output_filename="setup-py-runner.pex",
internal_only=True,
requirements=PexRequirements(["setuptools==44.0.0", "wheel==0.34.2"]),
interpreter_constraints=interpreter_constraints,
)
return rule_runner.request(Pex, [request])
def _run_setup_py(
rule_runner: RuleRunner,
plugin: str,
interpreter_constraints: InterpreterConstraints,
version: Optional[str],
setup_py_args: Iterable[str],
install_dir: str,
) -> None:
pex_obj = _create_pex(rule_runner, interpreter_constraints)
setup_py_file = FileContent(
"setup.py",
dedent(
f"""
from setuptools import setup
setup(name="{plugin}", version="{version or DEFAULT_VERSION}")
"""
).encode(),
)
source_digest = rule_runner.request(
Digest,
[CreateDigest([setup_py_file])],
)
merged_digest = rule_runner.request(Digest, [MergeDigests([pex_obj.digest, source_digest])])
process = PexProcess(
pex=pex_obj,
argv=("setup.py", *setup_py_args),
input_digest=merged_digest,
description="Run setup.py",
output_directories=("dist/",),
)
result = rule_runner.request(ProcessResult, [process])
result_snapshot = rule_runner.request(Snapshot, [result.output_digest])
rule_runner.scheduler.write_digest(result.output_digest, path_prefix="output")
safe_mkdir(install_dir)
for path in result_snapshot.files:
shutil.copy(PurePath(rule_runner.build_root, "output", path), install_dir)
@contextmanager
def plugin_resolution(
rule_runner: RuleRunner, *, interpreter=None, chroot=None, plugins=None, sdist=True
):
@contextmanager
def provide_chroot(existing):
if existing:
yield existing, False
else:
with temporary_dir() as new_chroot:
yield new_chroot, True
# Default to resolving with whatever we're currently running with.
interpreter_constraints = (
InterpreterConstraints([f"=={interpreter.identity.version_str}"]) if interpreter else None
)
artifact_interpreter_constraints = interpreter_constraints or InterpreterConstraints(
[f"=={'.'.join(map(str, sys.version_info[:3]))}"]
)
with provide_chroot(chroot) as (root_dir, create_artifacts):
env: Dict[str, str] = {}
repo_dir = None
if plugins:
repo_dir = os.path.join(root_dir, "repo")
env.update(
PANTS_PYTHON_REPOS_REPOS=f"['file://{repo_dir}']",
PANTS_PYTHON_REPOS_INDEXES="[]",
PANTS_PYTHON_SETUP_RESOLVER_CACHE_TTL="1",
)
plugin_list = []
for plugin in plugins:
version = None
if isinstance(plugin, tuple):
plugin, version = plugin
plugin_list.append(f"{plugin}=={version}" if version else plugin)
if create_artifacts:
setup_py_args = ["sdist" if sdist else "bdist_wheel", "--dist-dir", "dist/"]
_run_setup_py(
rule_runner,
plugin,
artifact_interpreter_constraints,
version,
setup_py_args,
repo_dir,
)
env["PANTS_PLUGINS"] = f"[{','.join(map(repr, plugin_list))}]"
configpath = os.path.join(root_dir, "pants.toml")
if create_artifacts:
touch(configpath)
args = [f"--pants-config-files=['{configpath}']"]
options_bootstrapper = OptionsBootstrapper.create(env=env, args=args, allow_pantsrc=False)
complete_env = CompleteEnvironment(
{**{k: os.environ[k] for k in ["PATH", "HOME", "PYENV_ROOT"] if k in os.environ}, **env}
)
bootstrap_scheduler = create_bootstrap_scheduler(options_bootstrapper)
plugin_resolver = PluginResolver(
bootstrap_scheduler, interpreter_constraints=interpreter_constraints
)
cache_dir = options_bootstrapper.bootstrap_options.for_global_scope().named_caches_dir
working_set = plugin_resolver.resolve(
options_bootstrapper, complete_env, WorkingSet(entries=[])
)
for dist in working_set:
assert (
Path(os.path.realpath(cache_dir)) in Path(os.path.realpath(dist.location)).parents
)
yield working_set, root_dir, repo_dir
def test_no_plugins(rule_runner: RuleRunner) -> None:
with plugin_resolution(rule_runner) as (working_set, _, _):
assert [] == list(working_set)
def test_plugins_sdist(rule_runner: RuleRunner) -> None:
_do_test_plugins(rule_runner, True)
def test_plugins_bdist(rule_runner: RuleRunner) -> None:
_do_test_plugins(rule_runner, False)
def _do_test_plugins(rule_runner: RuleRunner, sdist: bool) -> None:
with plugin_resolution(rule_runner, plugins=[("jake", "1.2.3"), "jane"], sdist=sdist) as (
working_set,
_,
_,
):
def assert_dist_version(name, expected_version):
dist = working_set.find(Requirement.parse(name))
assert expected_version == dist.version
assert_dist_version(name="jake", expected_version="1.2.3")
assert_dist_version(name="jane", expected_version=DEFAULT_VERSION)
def test_exact_requirements_sdist(rule_runner: RuleRunner) -> None:
_do_test_exact_requirements(rule_runner, True)
def test_exact_requirements_bdist(rule_runner: RuleRunner) -> None:
_do_test_exact_requirements(rule_runner, False)
def _do_test_exact_requirements(rule_runner: RuleRunner, sdist: bool) -> None:
with plugin_resolution(
rule_runner, plugins=[("jake", "1.2.3"), ("jane", "3.4.5")], sdist=sdist
) as results:
working_set, chroot, repo_dir = results
# Kill the repo source dir and re-resolve. If the PluginResolver truly detects exact
# requirements it should skip any resolves and load directly from the still intact
# cache.
safe_rmtree(repo_dir)
with plugin_resolution(
rule_runner, chroot=chroot, plugins=[("jake", "1.2.3"), ("jane", "3.4.5")]
) as results2:
working_set2, _, _ = results2
assert list(working_set) == list(working_set2)
@skip_unless_python36_and_python37_present
def test_exact_requirements_interpreter_change_sdist(rule_runner: RuleRunner) -> None:
_do_test_exact_requirements_interpreter_change(rule_runner, True)
@skip_unless_python36_and_python37_present
def test_exact_requirements_interpreter_change_bdist(rule_runner: RuleRunner) -> None:
_do_test_exact_requirements_interpreter_change(rule_runner, False)
def _do_test_exact_requirements_interpreter_change(rule_runner: RuleRunner, sdist: bool) -> None:
python36 = PythonInterpreter.from_binary(python_interpreter_path(PY_36))
python37 = PythonInterpreter.from_binary(python_interpreter_path(PY_37))
with plugin_resolution(
rule_runner,
interpreter=python36,
plugins=[("jake", "1.2.3"), ("jane", "3.4.5")],
sdist=sdist,
) as results:
working_set, chroot, repo_dir = results
safe_rmtree(repo_dir)
with pytest.raises(ExecutionError):
with plugin_resolution(
rule_runner,
interpreter=python37,
chroot=chroot,
plugins=[("jake", "1.2.3"), ("jane", "3.4.5")],
):
pytest.fail(
"Plugin re-resolution is expected for an incompatible interpreter and it is "
"expected to fail since we removed the dist `repo_dir` above."
)
# But for a compatible interpreter the exact resolve results should be re-used and load
# directly from the still in-tact cache.
with plugin_resolution(
rule_runner,
interpreter=python36,
chroot=chroot,
plugins=[("jake", "1.2.3"), ("jane", "3.4.5")],
) as results2:
working_set2, _, _ = results2
assert list(working_set) == list(working_set2)
| patricklaw/pants | tests/python/pants_test/init/test_plugin_resolver.py | Python | apache-2.0 | 10,358 |
import json,httplib
import sys
config_data = json.load(open('conf/net/ext_service/parse.json'))
interval = "interval_%s" % sys.argv[1]
print "pushing for interval %s" % interval
silent_push_msg = {
"channels": [
interval
],
"data": {
# "alert": "The Mets scored! The game is now tied 1-1.",
"content-available": 1,
"sound": "",
}
}
parse_headers = {
"X-Parse-Application-Id": config_data["emission_id"],
"X-Parse-REST-API-Key": config_data["emission_key"],
"Content-Type": "application/json"
}
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('POST', '/1/push', json.dumps(silent_push_msg), parse_headers)
result = json.loads(connection.getresponse().read())
print result
| yw374cornell/e-mission-server | bin/remotePush.py | Python | bsd-3-clause | 772 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools, channeltools
from bs4 import BeautifulSoup
from channelselector import get_thumb
host = 'http://pelisencastellano.com/'
IDIOMAS = {"esp": "CAST", "lat": "LAT", "sub": "VOSE"}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['fembed']
__channel__='pelisencastellano'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Peliculas" , action="lista", url= host, thumbnail=get_thumb("movies", auto=True)))
itemlist.append(item.clone(title="Genero" , action="categorias", url= host, thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb("search", auto=True)))
itemlist.append(item.clone(title="Configurar canal...", action="configuracion", text_color="gold", folder=False, thumbnail=get_thumb("setting_0.png")))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = "%s/?s=%s" % (host, texto)
if texto != "":
return lista(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
matches = soup.find_all('a', class_='nav-link')
for elem in matches:
url = elem['href']
title = elem.text
# url = urlparse.urljoin(item.url,url)
if not "peticiones" in url:
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=url,
section=item.section) )
return itemlist
def create_soup(url, referer=None, post=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
if post:
data = httptools.downloadpage(url, post=post).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def lista(item):
logger.info()
itemlist = []
soup = create_soup(item.url, referer=host)
matches = soup.find_all("div", class_='card')
for elem in matches:
url = elem.a['href']
thumbnail = elem.img['src']
title = elem.find('div', class_='card-title').text.replace("en Castellano", "").strip()
title = title.split()
year = title[-2]
title = " ".join(title[1:-2])
lang = "esp"
language = []
language.append(IDIOMAS.get(lang, lang))
contentTitle = title
if year == '':
year = '-'
if not config.get_setting('unify') and not channeltools.get_channel_parameters(__channel__)['force_unify']:
if year != "-":
title = "%s [COLOR cyan](%s)[/COLOR] [COLOR darkgrey]%s[/COLOR]" % (title,year, language)
else:
title = "%s [COLOR darkgrey]%s[/COLOR]" % (title, language)
else:
title = title
itemlist.append(Item(channel=item.channel, action = "findvideos", url=url, title=title, contentTitle = contentTitle,
thumbnail=thumbnail, language=language, infoLabels={"year": year}) )
tmdb.set_infoLabels(itemlist, True)
next_page = soup.find('span', class_='is-inactive')
if next_page and next_page.find_next_sibling("a"):
next_page = next_page.find_next_sibling("a")['href']
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
serv=[]
data = httptools.downloadpage(item.url).data
output = scrapertools.find_single_match(data, 'var output = "(.*?)output ').replace("\\", "")
output = output.split(";")
quality = scrapertools.find_single_match(data, "<strong>Calidad: </strong> (\d+)p<")
online = scrapertools.find_single_match(data, '<div class="centradito"><script>[A-z0-9]+ \(([^\)]+)')
online = online.replace('"', '').split(",")
for elem in output:
if "href" in elem :
ref = scrapertools.find_single_match(elem, 'href="([^"]+)"')
id = scrapertools.find_single_match(elem, 'codigo(\d+)')
if id:
id = (int(id)-1)
if "codigo" in ref:
url = online[id]
if not "no.html" in ref:
url = "%s%s" %(ref, online[id])
itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=url))
descarga = scrapertools.find_single_match(data, "var abc = '([^']+)'")
itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=descarga))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
itemlist.append(item.clone(action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist
| alfa-addon/addon | plugin.video.alfa/channels/pelisencastellano.py | Python | gpl-3.0 | 6,956 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggering DAG runs APIs."""
import json
from datetime import datetime
from typing import List, Optional, Union
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> List[DagRun]: # pylint: disable=too-many-arguments
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag_id not in dag_bag.dags:
raise DagNotFound(f"Dag id {dag_id} not found")
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and 'start_date' in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
"The execution_date [{}] should be >= start_date [{}] from DAG's default_args".format(
execution_date.isoformat(), min_dag_start_date.isoformat()
)
)
run_id = run_id or DagRun.generate_run_id(DagRunType.MANUAL, execution_date)
dag_run = DagRun.find(dag_id=dag_id, run_id=run_id)
if dag_run:
raise DagRunAlreadyExists(f"Run id {run_id} already exists for dag id {dag_id}")
run_conf = None
if conf:
run_conf = conf if isinstance(conf, dict) else json.loads(conf)
triggers = []
dags_to_trigger = [dag] + dag.subdags
for _dag in dags_to_trigger:
trigger = _dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
dag_hash=dag_bag.dags_hash.get(dag_id),
)
triggers.append(trigger)
return triggers
def trigger_dag(
dag_id: str,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> Optional[DagRun]:
"""Triggers execution of DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
triggers = _trigger_dag(
dag_id=dag_id,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| airbnb/airflow | airflow/api/common/experimental/trigger_dag.py | Python | apache-2.0 | 4,451 |
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from unittest.mock import MagicMock
class Port(metaclass=ABCMeta):
"""
Port is a parent abstraction for inputs and outputs
:param Effect effect: Effect that contains port
"""
def __init__(self, effect):
self._effect = effect
self.observer = MagicMock()
@property
@abstractmethod
def symbol(self):
"""
:return: Identifier for this port
"""
pass
@property
def effect(self):
"""
:return: Effect that this port is related
"""
return self._effect
@property
def json(self):
"""
Get a json decodable representation
:return dict: json representation
"""
return self.__dict__
@property
@abstractmethod
def index(self):
"""
:return: Index in the effect related based in your category.
As example, if this port is a :class:`input`, the
index returns your position in the inputs ports.
"""
pass
@property
def __dict__(self):
return {
'effect': self.effect.index,
'symbol': self.symbol,
'index': self.index,
}
def __repr__(self):
return "<{} object as {} at 0x{:x}>".format(
self.__class__.__name__,
str(self),
id(self)
)
@property
@abstractmethod
def connection_class(self):
"""
:return: Class used for connections in this port
"""
pass
| PedalPi/PluginsManager | pluginsmanager/model/port.py | Python | apache-2.0 | 2,161 |
#!/usr/bin/env python
import argparse
import mirheo as mir
import numpy as np
import os
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str, required=True)
args = parser.parse_args()
dt = 0.001
ranks = (1, 1, 1)
domain = (32, 32, 32)
tdump_every = 0.001
dump_every = int(tdump_every / dt)
u = mir.Mirheo(ranks, domain, debug_level=3, log_filename='log', no_splash=True)
pv_name="pv"
path="pressure"
pv = mir.ParticleVectors.ParticleVector(pv_name, mass = 1)
ic = mir.InitialConditions.Uniform(number_density=10)
u.registerParticleVector(pv, ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=10.0, kBT=1.0, power=0.5, stress=True, stress_period=tdump_every)
u.registerInteraction(dpd)
u.setInteraction(dpd, pv, pv)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
def predicate_all_domain(r):
return 1.0
h = (1.0, 1.0, 1.0)
u.registerPlugins(mir.Plugins.createVirialPressurePlugin('Pressure', pv, predicate_all_domain, h, dump_every, path))
u.run(2001, dt=dt)
volume = domain[0]*domain[1]*domain[2]
if u.isMasterTask():
df = pd.read_csv(os.path.join(path, pv_name+".csv"))
p_mean = np.mean(df["pressure"]) / volume
np.savetxt(args.out, [p_mean])
del(u)
# nTEST: stress.pressure
# cd stress
# rm -rf pressure pressure.txt
# mir.run --runargs "-n 2" ./pressure.py --out pressure.txt
# cat pressure.txt | uscale 0.1 > pressure.out.txt
| dimaleks/uDeviceX | tests/stress/pressure.py | Python | gpl-3.0 | 1,475 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.currencies
~~~~~~~~~~~~~~~
Provides currency lookup dictionaries
Attributes:
CURRENCY_SYMBOLS (dict): Currency symbol to code mapping
CURRENCY_CODES (dict): Currency symbol to info mapping
"""
CURRENCY_SYMBOLS = {
'$': 'USD', '£': 'GBP', '€': 'EUR', '₹': 'INR',
'\xa3': 'GBP', '\u20ac': 'EUR', '\u20b9': 'INR',
}
CURRENCY_CODES = {
'AED': {
'code': 'AED',
'decimal_digits': 2,
'location': 'United Arab Emirates',
'name': 'United Arab Emirates Dirham',
'name_plural': 'UAE dirhams',
'rounding': 0,
'symbol': 'AED',
'symbol_native': 'د.إ.'},
'AFN': {
'code': 'AFN',
'decimal_digits': 0,
'location': 'Afghanistan',
'name': 'Afghan Afghani',
'name_plural': 'Afghan Afghanis',
'rounding': 0,
'symbol': 'Af',
'symbol_native': '؋'},
'ALL': {
'code': 'ALL',
'decimal_digits': 0,
'location': 'Albania',
'name': 'Albanian Lek',
'name_plural': 'Albanian lekë',
'rounding': 0,
'symbol': 'ALL',
'symbol_native': 'Lek'},
'AMD': {
'code': 'AMD',
'decimal_digits': 0,
'location': 'Armenia',
'name': 'Armenian Dram',
'name_plural': 'Armenian drams',
'rounding': 0,
'symbol': 'AMD',
'symbol_native': 'դր.'},
'ARS': {
'code': 'ARS',
'decimal_digits': 2,
'location': 'Argentina',
'name': 'Argentine Peso',
'name_plural': 'Argentine pesos',
'rounding': 0,
'symbol': 'AR$',
'symbol_native': '$'},
'AUD': {
'code': 'AUD',
'decimal_digits': 2,
'location': 'Australia',
'name': 'Australian Dollar',
'name_plural': 'Australian dollars',
'rounding': 0,
'symbol': 'AU$',
'symbol_native': '$'},
'AZN': {
'code': 'AZN',
'decimal_digits': 2,
'location': 'Azerbaijan',
'name': 'Azerbaijani Manat',
'name_plural': 'Azerbaijani manats',
'rounding': 0,
'symbol': 'man.',
'symbol_native': 'ман.'},
'BAM': {
'code': 'BAM',
'decimal_digits': 2,
'location': 'Bosnia and Herzegovina',
'name': 'Bosnia-Herzegovina Convertible Mark',
'name_plural': 'Bosnia-Herzegovina convertible marks',
'rounding': 0,
'symbol': 'KM',
'symbol_native': 'KM'},
'BDT': {
'code': 'BDT',
'decimal_digits': 2,
'location': 'Bangladesh',
'name': 'Bangladeshi Taka',
'name_plural': 'Bangladeshi takas',
'rounding': 0,
'symbol': 'Tk',
'symbol_native': '৳'},
'BGN': {
'code': 'BGN',
'decimal_digits': 2,
'location': 'Bulgaria',
'name': 'Bulgarian Lev',
'name_plural': 'Bulgarian leva',
'rounding': 0,
'symbol': 'BGN',
'symbol_native': 'лв.'},
'BHD': {
'code': 'BHD',
'decimal_digits': 3,
'location': 'Bahrain',
'name': 'Bahraini Dinar',
'name_plural': 'Bahraini dinars',
'rounding': 0,
'symbol': 'BD',
'symbol_native': 'د.ب.'},
'BIF': {
'code': 'BIF',
'decimal_digits': 0,
'location': 'Burundi',
'name': 'Burundian Franc',
'name_plural': 'Burundian francs',
'rounding': 0,
'symbol': 'FBu',
'symbol_native': 'FBu'},
'BND': {
'code': 'BND',
'decimal_digits': 2,
'location': 'Brunei',
'name': 'Brunei Dollar',
'name_plural': 'Brunei dollars',
'rounding': 0,
'symbol': 'BN$',
'symbol_native': '$'},
'BOB': {
'code': 'BOB',
'decimal_digits': 2,
'location': 'Bolivia',
'name': 'Bolivian Boliviano',
'name_plural': 'Bolivian bolivianos',
'rounding': 0,
'symbol': 'Bs',
'symbol_native': 'Bs'},
'BRL': {
'code': 'BRL',
'decimal_digits': 2,
'location': 'Brazil',
'name': 'Brazilian Real',
'name_plural': 'Brazilian reals',
'rounding': 0,
'symbol': 'R$',
'symbol_native': 'R$'},
'BWP': {
'code': 'BWP',
'decimal_digits': 2,
'location': 'Botswana',
'name': 'Botswanan Pula',
'name_plural': 'Botswanan pulas',
'rounding': 0,
'symbol': 'BWP',
'symbol_native': 'P'},
'BYR': {
'code': 'BYR',
'decimal_digits': 0,
'location': 'Belarus',
'name': 'Belarusian Ruble',
'name_plural': 'Belarusian rubles',
'rounding': 0,
'symbol': 'BYR',
'symbol_native': 'BYR'},
'BZD': {
'code': 'BZD',
'decimal_digits': 2,
'location': 'Belize',
'name': 'Belize Dollar',
'name_plural': 'Belize dollars',
'rounding': 0,
'symbol': 'BZ$',
'symbol_native': '$'},
'CAD': {
'code': 'CAD',
'decimal_digits': 2,
'location': 'Canada',
'name': 'Canadian Dollar',
'name_plural': 'Canadian dollars',
'rounding': 0,
'symbol': 'CA$',
'symbol_native': '$'},
'CDF': {
'code': 'CDF',
'decimal_digits': 2,
'location': 'Democratic Republic of the Congo',
'name': 'Congolese Franc',
'name_plural': 'Congolese francs',
'rounding': 0,
'symbol': 'CDF',
'symbol_native': 'FrCD'},
'CHF': {
'code': 'CHF',
'decimal_digits': 2,
'location': 'Switzerland',
'name': 'Swiss Franc',
'name_plural': 'Swiss francs',
'rounding': 0.05,
'symbol': 'CHF',
'symbol_native': 'CHF'},
'CLP': {
'code': 'CLP',
'decimal_digits': 0,
'location': 'Chile',
'name': 'Chilean Peso',
'name_plural': 'Chilean pesos',
'rounding': 0,
'symbol': 'CL$',
'symbol_native': '$'},
'CNY': {
'code': 'CNY',
'decimal_digits': 2,
'location': 'China',
'name': 'Chinese Yuan',
'name_plural': 'Chinese yuan',
'rounding': 0,
'symbol': 'CN¥',
'symbol_native': 'CN¥'},
'COP': {
'code': 'COP',
'decimal_digits': 0,
'location': 'Colombia',
'name': 'Colombian Peso',
'name_plural': 'Colombian pesos',
'rounding': 0,
'symbol': 'CO$',
'symbol_native': '$'},
'CRC': {
'code': 'CRC',
'decimal_digits': 0,
'location': 'Costa Rica',
'name': 'Costa Rican Colón',
'name_plural': 'Costa Rican colóns',
'rounding': 0,
'symbol': '₡',
'symbol_native': '₡'},
'CVE': {
'code': 'CVE',
'decimal_digits': 2,
'location': 'Cape Verde',
'name': 'Cape Verdean Escudo',
'name_plural': 'Cape Verdean escudos',
'rounding': 0,
'symbol': 'CV$',
'symbol_native': 'CV$'},
'CZK': {
'code': 'CZK',
'decimal_digits': 2,
'location': 'Czech Republic',
'name': 'Czech Republic Koruna',
'name_plural': 'Czech Republic korunas',
'rounding': 0,
'symbol': 'Kč',
'symbol_native': 'Kč'},
'DJF': {
'code': 'DJF',
'decimal_digits': 0,
'location': 'Djibouti',
'name': 'Djiboutian Franc',
'name_plural': 'Djiboutian francs',
'rounding': 0,
'symbol': 'Fdj',
'symbol_native': 'Fdj'},
'DKK': {
'code': 'DKK',
'decimal_digits': 2,
'location': 'Denmark',
'name': 'Danish Krone',
'name_plural': 'Danish kroner',
'rounding': 0,
'symbol': 'Dkr',
'symbol_native': 'kr'},
'DOP': {
'code': 'DOP',
'decimal_digits': 2,
'location': 'Dominican Republic',
'name': 'Dominican Peso',
'name_plural': 'Dominican pesos',
'rounding': 0,
'symbol': 'RD$',
'symbol_native': 'RD$'},
'DZD': {
'code': 'DZD',
'decimal_digits': 2,
'location': 'Algeria',
'name': 'Algerian Dinar',
'name_plural': 'Algerian dinars',
'rounding': 0,
'symbol': 'DA',
'symbol_native': 'د.ج.'},
'EEK': {
'code': 'EEK',
'decimal_digits': 2,
'name': 'Estonian Kroon',
'name_plural': 'Estonian kroons',
'rounding': 0,
'symbol': 'Ekr',
'symbol_native': 'kr'},
'EGP': {
'code': 'EGP',
'decimal_digits': 2,
'location': 'Egypt',
'name': 'Egyptian Pound',
'name_plural': 'Egyptian pounds',
'rounding': 0,
'symbol': 'EGP',
'symbol_native': 'ج.م.'},
'ERN': {
'code': 'ERN',
'decimal_digits': 2,
'location': 'Eritrea',
'name': 'Eritrean Nakfa',
'name_plural': 'Eritrean nakfas',
'rounding': 0,
'symbol': 'Nfk',
'symbol_native': 'Nfk'},
'ETB': {
'code': 'ETB',
'decimal_digits': 2,
'location': 'Ethiopia',
'name': 'Ethiopian Birr',
'name_plural': 'Ethiopian birrs',
'rounding': 0,
'symbol': 'Br',
'symbol_native': 'Br'},
'EUR': {
'code': 'EUR',
'decimal_digits': 2,
'location': 'European Union',
'name': 'Euro',
'name_plural': 'euros',
'rounding': 0,
'symbol': '€',
'symbol_native': '€'},
'GBP': {
'code': 'GBP',
'decimal_digits': 2,
'location': 'United Kingdom',
'name': 'British Pound Sterling',
'name_plural': 'British pounds sterling',
'rounding': 0,
'symbol': '£',
'symbol_native': '£'},
'GEL': {
'code': 'GEL',
'decimal_digits': 2,
'location': 'Georgia',
'name': 'Georgian Lari',
'name_plural': 'Georgian laris',
'rounding': 0,
'symbol': 'GEL',
'symbol_native': 'GEL'},
'GHS': {
'code': 'GHS',
'decimal_digits': 2,
'location': 'Ghana',
'name': 'Ghanaian Cedi',
'name_plural': 'Ghanaian cedis',
'rounding': 0,
'symbol': 'GH₵',
'symbol_native': 'GH₵'},
'GNF': {
'code': 'GNF',
'decimal_digits': 0,
'location': 'Guinea',
'name': 'Guinean Franc',
'name_plural': 'Guinean francs',
'rounding': 0,
'symbol': 'FG',
'symbol_native': 'FG'},
'GTQ': {
'code': 'GTQ',
'decimal_digits': 2,
'location': 'Guatemala',
'name': 'Guatemalan Quetzal',
'name_plural': 'Guatemalan quetzals',
'rounding': 0,
'symbol': 'GTQ',
'symbol_native': 'Q'},
'HKD': {
'code': 'HKD',
'decimal_digits': 2,
'location': 'Hong Kong',
'name': 'Hong Kong Dollar',
'name_plural': 'Hong Kong dollars',
'rounding': 0,
'symbol': 'HK$',
'symbol_native': '$'},
'HNL': {
'code': 'HNL',
'decimal_digits': 2,
'location': 'Honduras',
'name': 'Honduran Lempira',
'name_plural': 'Honduran lempiras',
'rounding': 0,
'symbol': 'HNL',
'symbol_native': 'L'},
'HRK': {
'code': 'HRK',
'decimal_digits': 2,
'location': 'Croatia',
'name': 'Croatian Kuna',
'name_plural': 'Croatian kunas',
'rounding': 0,
'symbol': 'kn',
'symbol_native': 'kn'},
'HUF': {
'code': 'HUF',
'decimal_digits': 0,
'location': 'Hungary',
'name': 'Hungarian Forint',
'name_plural': 'Hungarian forints',
'rounding': 0,
'symbol': 'Ft',
'symbol_native': 'Ft'},
'IDR': {
'code': 'IDR',
'decimal_digits': 0,
'location': 'Indonesia',
'name': 'Indonesian Rupiah',
'name_plural': 'Indonesian rupiahs',
'rounding': 0,
'symbol': 'Rp',
'symbol_native': 'Rp'},
'ILS': {
'code': 'ILS',
'decimal_digits': 2,
'location': 'Israel',
'name': 'Israeli New Sheqel',
'name_plural': 'Israeli new sheqels',
'rounding': 0,
'symbol': '₪',
'symbol_native': '₪'},
'INR': {
'code': 'INR',
'decimal_digits': 2,
'location': 'India',
'name': 'Indian Rupee',
'name_plural': 'Indian rupees',
'rounding': 0,
'symbol': 'Rs',
'symbol_native': '₹'},
'IQD': {
'code': 'IQD',
'decimal_digits': 0,
'location': 'Iraq',
'name': 'Iraqi Dinar',
'name_plural': 'Iraqi dinars',
'rounding': 0,
'symbol': 'IQD',
'symbol_native': 'د.ع.'},
'IRR': {
'code': 'IRR',
'decimal_digits': 0,
'location': 'Iran',
'name': 'Iranian Rial',
'name_plural': 'Iranian rials',
'rounding': 0,
'symbol': 'IRR',
'symbol_native': '﷼'},
'ISK': {
'code': 'ISK',
'decimal_digits': 0,
'location': 'Iceland',
'name': 'Icelandic Króna',
'name_plural': 'Icelandic krónur',
'rounding': 0,
'symbol': 'Ikr',
'symbol_native': 'kr'},
'JMD': {
'code': 'JMD',
'decimal_digits': 2,
'location': 'Jamaica',
'name': 'Jamaican Dollar',
'name_plural': 'Jamaican dollars',
'rounding': 0,
'symbol': 'J$',
'symbol_native': '$'},
'JOD': {
'code': 'JOD',
'decimal_digits': 3,
'location': 'Jordan',
'name': 'Jordanian Dinar',
'name_plural': 'Jordanian dinars',
'rounding': 0,
'symbol': 'JD',
'symbol_native': 'د.أ.'},
'JPY': {
'code': 'JPY',
'decimal_digits': 0,
'location': 'Japan',
'name': 'Japanese Yen',
'name_plural': 'Japanese yen',
'rounding': 0,
'symbol': '¥',
'symbol_native': '¥'},
'KES': {
'code': 'KES',
'decimal_digits': 2,
'location': 'Kenya',
'name': 'Kenyan Shilling',
'name_plural': 'Kenyan shillings',
'rounding': 0,
'symbol': 'Ksh',
'symbol_native': '/–'},
'KHR': {
'code': 'KHR',
'decimal_digits': 2,
'location': 'Cambodia',
'name': 'Cambodian Riel',
'name_plural': 'Cambodian riels',
'rounding': 0,
'symbol': 'KHR',
'symbol_native': '៛'},
'KMF': {
'code': 'KMF',
'decimal_digits': 0,
'location': 'Comoros',
'name': 'Comorian Franc',
'name_plural': 'Comorian francs',
'rounding': 0,
'symbol': 'CF',
'symbol_native': 'FC'},
'KRW': {
'code': 'KRW',
'decimal_digits': 0,
'location': 'South Korea',
'name': 'South Korean Won',
'name_plural': 'South Korean won',
'rounding': 0,
'symbol': '₩',
'symbol_native': '₩'},
'KWD': {
'code': 'KWD',
'decimal_digits': 3,
'location': 'Kuwait',
'name': 'Kuwaiti Dinar',
'name_plural': 'Kuwaiti dinars',
'rounding': 0,
'symbol': 'KD',
'symbol_native': 'د.ك.'},
'KZT': {
'code': 'KZT',
'decimal_digits': 2,
'location': 'Kazakhstan',
'name': 'Kazakhstani Tenge',
'name_plural': 'Kazakhstani tenges',
'rounding': 0,
'symbol': 'KZT',
'symbol_native': 'тңг.'},
'LBP': {
'code': 'LBP',
'decimal_digits': 0,
'location': 'Lebanon',
'name': 'Lebanese Pound',
'name_plural': 'Lebanese pounds',
'rounding': 0,
'symbol': 'LB£',
'symbol_native': 'ل.ل.'},
'LKR': {
'code': 'LKR',
'decimal_digits': 2,
'location': 'Sri Lanka',
'name': 'Sri Lankan Rupee',
'name_plural': 'Sri Lankan rupees',
'rounding': 0,
'symbol': 'SLRs',
'symbol_native': 'SL Re'},
'LTL': {
'code': 'LTL',
'decimal_digits': 2,
'name': 'Lithuanian Litas',
'name_plural': 'Lithuanian litai',
'rounding': 0,
'symbol': 'Lt',
'symbol_native': 'Lt'},
'LVL': {
'code': 'LVL',
'decimal_digits': 2,
'name': 'Latvian Lats',
'name_plural': 'Latvian lati',
'rounding': 0,
'symbol': 'Ls',
'symbol_native': 'Ls'},
'LYD': {
'code': 'LYD',
'decimal_digits': 3,
'location': 'Libya',
'name': 'Libyan Dinar',
'name_plural': 'Libyan dinars',
'rounding': 0,
'symbol': 'LD',
'symbol_native': 'د.ل.'},
'MAD': {
'code': 'MAD',
'decimal_digits': 2,
'location': 'Morocco',
'name': 'Moroccan Dirham',
'name_plural': 'Moroccan dirhams',
'rounding': 0,
'symbol': 'MAD',
'symbol_native': 'د.م.'},
'MDL': {
'code': 'MDL',
'decimal_digits': 2,
'location': 'Moldova',
'name': 'Moldovan Leu',
'name_plural': 'Moldovan lei',
'rounding': 0,
'symbol': 'MDL',
'symbol_native': 'MDL'},
'MGA': {
'code': 'MGA',
'decimal_digits': 0,
'location': 'Madagascar',
'name': 'Malagasy Ariary',
'name_plural': 'Malagasy Ariaries',
'rounding': 0,
'symbol': 'MGA',
'symbol_native': 'MGA'},
'MKD': {
'code': 'MKD',
'decimal_digits': 2,
'location': 'Macedonia',
'name': 'Macedonian Denar',
'name_plural': 'Macedonian denari',
'rounding': 0,
'symbol': 'MKD',
'symbol_native': 'MKD'},
'MMK': {
'code': 'MMK',
'decimal_digits': 0,
'location': 'Myanmar',
'name': 'Myanma Kyat',
'name_plural': 'Myanma kyats',
'rounding': 0,
'symbol': 'MMK',
'symbol_native': 'K'},
'MOP': {
'code': 'MOP',
'decimal_digits': 2,
'location': 'Macao',
'name': 'Macanese Pataca',
'name_plural': 'Macanese patacas',
'rounding': 0,
'symbol': 'MOP$',
'symbol_native': 'MOP$'},
'MUR': {
'code': 'MUR',
'decimal_digits': 0,
'location': 'Mauritius',
'name': 'Mauritian Rupee',
'name_plural': 'Mauritian rupees',
'rounding': 0,
'symbol': 'MURs',
'symbol_native': 'MURs'},
'MXN': {
'code': 'MXN',
'decimal_digits': 2,
'location': 'Mexico',
'name': 'Mexican Peso',
'name_plural': 'Mexican pesos',
'rounding': 0,
'symbol': 'MX$',
'symbol_native': '$'},
'MYR': {
'code': 'MYR',
'decimal_digits': 2,
'location': 'Malaysia',
'name': 'Malaysian Ringgit',
'name_plural': 'Malaysian ringgits',
'rounding': 0,
'symbol': 'RM',
'symbol_native': 'RM'},
'MZN': {
'code': 'MZN',
'decimal_digits': 2,
'location': 'Mozambique',
'name': 'Mozambican Metical',
'name_plural': 'Mozambican meticals',
'rounding': 0,
'symbol': 'MTn',
'symbol_native': 'MTn'},
'NAD': {
'code': 'NAD',
'decimal_digits': 2,
'location': 'Namibia',
'name': 'Namibian Dollar',
'name_plural': 'Namibian dollars',
'rounding': 0,
'symbol': 'N$',
'symbol_native': 'N$'},
'NGN': {
'code': 'NGN',
'decimal_digits': 2,
'location': 'Nigeria',
'name': 'Nigerian Naira',
'name_plural': 'Nigerian nairas',
'rounding': 0,
'symbol': '₦',
'symbol_native': '₦'},
'NIO': {
'code': 'NIO',
'decimal_digits': 2,
'location': 'Nicaragua',
'name': 'Nicaraguan Córdoba',
'name_plural': 'Nicaraguan córdobas',
'rounding': 0,
'symbol': 'C$',
'symbol_native': 'C$'},
'NOK': {
'code': 'NOK',
'decimal_digits': 2,
'location': 'Norway',
'name': 'Norwegian Krone',
'name_plural': 'Norwegian kroner',
'rounding': 0,
'symbol': 'Nkr',
'symbol_native': 'kr'},
'NPR': {
'code': 'NPR',
'decimal_digits': 2,
'location': 'Nepal',
'name': 'Nepalese Rupee',
'name_plural': 'Nepalese rupees',
'rounding': 0,
'symbol': 'NPRs',
'symbol_native': 'नेरू'},
'NZD': {
'code': 'NZD',
'decimal_digits': 2,
'location': 'New Zealand',
'name': 'New Zealand Dollar',
'name_plural': 'New Zealand dollars',
'rounding': 0,
'symbol': 'NZ$',
'symbol_native': '$'},
'OMR': {
'code': 'OMR',
'decimal_digits': 3,
'location': 'Oman',
'name': 'Omani Rial',
'name_plural': 'Omani rials',
'rounding': 0,
'symbol': 'OMR',
'symbol_native': 'ر.ع.'},
'PAB': {
'code': 'PAB',
'decimal_digits': 2,
'location': 'Panama',
'name': 'Panamanian Balboa',
'name_plural': 'Panamanian balboas',
'rounding': 0,
'symbol': 'B/.',
'symbol_native': 'B/.'},
'PEN': {
'code': 'PEN',
'decimal_digits': 2,
'location': 'Peru',
'name': 'Peruvian Nuevo Sol',
'name_plural': 'Peruvian nuevos soles',
'rounding': 0,
'symbol': 'S/.',
'symbol_native': 'S/.'},
'PHP': {
'code': 'PHP',
'decimal_digits': 2,
'location': 'Philippines',
'name': 'Philippine Peso',
'name_plural': 'Philippine pesos',
'rounding': 0,
'symbol': '₱',
'symbol_native': '₱'},
'PKR': {
'code': 'PKR',
'decimal_digits': 0,
'location': 'Pakistan',
'name': 'Pakistani Rupee',
'name_plural': 'Pakistani rupees',
'rounding': 0,
'symbol': 'PKRs',
'symbol_native': '₨'},
'PLN': {
'code': 'PLN',
'decimal_digits': 2,
'location': 'Poland',
'name': 'Polish Zloty',
'name_plural': 'Polish zlotys',
'rounding': 0,
'symbol': 'zł',
'symbol_native': 'zł'},
'PYG': {
'code': 'PYG',
'decimal_digits': 0,
'location': 'Paraguay',
'name': 'Paraguayan Guarani',
'name_plural': 'Paraguayan guaranis',
'rounding': 0,
'symbol': '₲',
'symbol_native': '₲'},
'QAR': {
'code': 'QAR',
'decimal_digits': 2,
'location': 'Qatar',
'name': 'Qatari Rial',
'name_plural': 'Qatari rials',
'rounding': 0,
'symbol': 'QR',
'symbol_native': 'ر.ق.'},
'RON': {
'code': 'RON',
'decimal_digits': 2,
'location': 'Romania',
'name': 'Romanian Leu',
'name_plural': 'Romanian lei',
'rounding': 0,
'symbol': 'RON',
'symbol_native': 'RON'},
'RSD': {
'code': 'RSD',
'decimal_digits': 0,
'location': 'Serbia',
'name': 'Serbian Dinar',
'name_plural': 'Serbian dinars',
'rounding': 0,
'symbol': 'din.',
'symbol_native': 'дин.'},
'RUB': {
'code': 'RUB',
'decimal_digits': 2,
'location': 'Russia',
'name': 'Russian Ruble',
'name_plural': 'Russian rubles',
'rounding': 0,
'symbol': 'RUB',
'symbol_native': 'руб.'},
'RWF': {
'code': 'RWF',
'decimal_digits': 0,
'location': 'Rwanda',
'name': 'Rwandan Franc',
'name_plural': 'Rwandan francs',
'rounding': 0,
'symbol': 'RWF',
'symbol_native': 'FR'},
'SAR': {
'code': 'SAR',
'decimal_digits': 2,
'location': 'Saudi Arabia',
'name': 'Saudi Riyal',
'name_plural': 'Saudi riyals',
'rounding': 0,
'symbol': 'SR',
'symbol_native': 'ر.س.'},
'SDG': {
'code': 'SDG',
'decimal_digits': 2,
'location': 'Sudan',
'name': 'Sudanese Pound',
'name_plural': 'Sudanese pounds',
'rounding': 0,
'symbol': 'SDG',
'symbol_native': 'SDG'},
'SEK': {
'code': 'SEK',
'decimal_digits': 2,
'location': 'Sweden',
'name': 'Swedish Krona',
'name_plural': 'Swedish kronor',
'rounding': 0,
'symbol': 'Skr',
'symbol_native': 'kr'},
'SGD': {
'code': 'SGD',
'decimal_digits': 2,
'location': 'Singapore',
'name': 'Singapore Dollar',
'name_plural': 'Singapore dollars',
'rounding': 0,
'symbol': 'S$',
'symbol_native': '$'},
'SOS': {
'code': 'SOS',
'decimal_digits': 0,
'location': 'Somalia',
'name': 'Somali Shilling',
'name_plural': 'Somali shillings',
'rounding': 0,
'symbol': 'Ssh',
'symbol_native': 'Ssh'},
'SYP': {
'code': 'SYP',
'decimal_digits': 0,
'location': 'Syria',
'name': 'Syrian Pound',
'name_plural': 'Syrian pounds',
'rounding': 0,
'symbol': 'SY£',
'symbol_native': 'ل.س.'},
'THB': {
'code': 'THB',
'decimal_digits': 2,
'location': 'Thailand',
'name': 'Thai Baht',
'name_plural': 'Thai baht',
'rounding': 0,
'symbol': '฿',
'symbol_native': '฿'},
'TND': {
'code': 'TND',
'decimal_digits': 3,
'location': 'Tunisia',
'name': 'Tunisian Dinar',
'name_plural': 'Tunisian dinars',
'rounding': 0,
'symbol': 'DT',
'symbol_native': 'د.ت.'},
'TOP': {
'code': 'TOP',
'decimal_digits': 2,
'location': 'Tonga',
'name': 'Tongan Paʻanga',
'name_plural': 'Tongan paʻanga',
'rounding': 0,
'symbol': 'T$',
'symbol_native': 'T$'},
'TRY': {
'code': 'TRY',
'decimal_digits': 2,
'location': 'Turkey',
'name': 'Turkish Lira',
'name_plural': 'Turkish Lira',
'rounding': 0,
'symbol': 'TL',
'symbol_native': '₤'},
'TTD': {
'code': 'TTD',
'decimal_digits': 2,
'location': 'Trinidad and Tobago',
'name': 'Trinidad and Tobago Dollar',
'name_plural': 'Trinidad and Tobago dollars',
'rounding': 0,
'symbol': 'TT$',
'symbol_native': '$'},
'TWD': {
'code': 'TWD',
'decimal_digits': 2,
'location': 'Taiwan',
'name': 'New Taiwan Dollar',
'name_plural': 'New Taiwan dollars',
'rounding': 0,
'symbol': 'NT$',
'symbol_native': 'NT$'},
'TZS': {
'code': 'TZS',
'decimal_digits': 0,
'location': 'Tanzania',
'name': 'Tanzanian Shilling',
'name_plural': 'Tanzanian shillings',
'rounding': 0,
'symbol': 'TSh',
'symbol_native': '/–'},
'UAH': {
'code': 'UAH',
'decimal_digits': 2,
'location': 'Ukraine',
'name': 'Ukrainian Hryvnia',
'name_plural': 'Ukrainian hryvnias',
'rounding': 0,
'symbol': '₴',
'symbol_native': '₴'},
'UGX': {
'code': 'UGX',
'decimal_digits': 0,
'location': 'Uganda',
'name': 'Ugandan Shilling',
'name_plural': 'Ugandan shillings',
'rounding': 0,
'symbol': 'USh',
'symbol_native': 'USh'},
'USD': {
'code': 'USD',
'decimal_digits': 2,
'location': 'United States',
'name': 'US Dollar',
'name_plural': 'US dollars',
'rounding': 0,
'symbol': '$',
'symbol_native': '$'},
'UYU': {
'code': 'UYU',
'decimal_digits': 2,
'location': 'Uruguay',
'name': 'Uruguayan Peso',
'name_plural': 'Uruguayan pesos',
'rounding': 0,
'symbol': '$U',
'symbol_native': '$'},
'UZS': {
'code': 'UZS',
'decimal_digits': 0,
'location': 'Uzbekistan',
'name': 'Uzbekistan Som',
'name_plural': 'Uzbekistan som',
'rounding': 0,
'symbol': 'UZS',
'symbol_native': 'UZS'},
'VEF': {
'code': 'VEF',
'decimal_digits': 2,
'location': 'Venezuela',
'name': 'Venezuelan Bolívar',
'name_plural': 'Venezuelan bolívars',
'rounding': 0,
'symbol': 'Bs.F.',
'symbol_native': 'Bs.F.'},
'VND': {
'code': 'VND',
'decimal_digits': 0,
'location': 'Vietnam',
'name': 'Vietnamese Dong',
'name_plural': 'Vietnamese dong',
'rounding': 0,
'symbol': '₫',
'symbol_native': '₫'},
'XAF': {
'code': 'XAF',
'decimal_digits': 0,
'location': 'Cameroon',
'name': 'CFA Franc BEAC',
'name_plural': 'CFA francs BEAC',
'rounding': 0,
'symbol': 'FCFA',
'symbol_native': 'FCFA'},
'XOF': {
'code': 'XOF',
'decimal_digits': 0,
'location': 'Benin',
'name': 'CFA Franc BCEAO',
'name_plural': 'CFA francs BCEAO',
'rounding': 0,
'symbol': 'CFA',
'symbol_native': 'CFA'},
'YER': {
'code': 'YER',
'decimal_digits': 0,
'location': 'Yemen',
'name': 'Yemeni Rial',
'name_plural': 'Yemeni rials',
'rounding': 0,
'symbol': 'YR',
'symbol_native': 'ر.ي.'},
'ZAR': {
'code': 'ZAR',
'decimal_digits': 2,
'location': 'South Africa',
'name': 'South African Rand',
'name_plural': 'South African rand',
'rounding': 0,
'symbol': 'R',
'symbol_native': 'R'},
'ZMK': {
'code': 'ZMK',
'decimal_digits': 0,
'name': 'Zambian Kwacha',
'name_plural': 'Zambian kwachas',
'rounding': 0,
'symbol': 'ZK',
'symbol_native': 'ZK'},
'ZWL': {
'code': 'ZWL', 'location': 'Zimbabwe'}}
| nerevu/riko | riko/currencies.py | Python | mit | 30,514 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger("scheduler")
from precious import models
from precious.worker import ProjectManagment
def check_remote_git_revision():
logger.info("Checking project remote revision")
for project in models.Project.query.all():
pm = ProjectManagment(project)
pm.start_project()
if pm.check_revison() is True:
pm.build_project()
def check_should_buid():
logger.info("Checking for project need to be build")
for project in models.Project.query.all():
pm = ProjectManagment(project)
pm.start_project()
pm.build_project()
def check_project_buid_web():
logger.info("Will build project from web")
for project in models.Project.query.all():
pm = ProjectManagment(project)
pm.start_project()
if pm.check_project_shedudled() is True:
pm.build_project()
| bzyx/precious | precious/sheduler/tasks.py | Python | bsd-3-clause | 943 |
# -*- coding: utf-8 -*-
# Tested on Markdown 2.3.1
#
# Copyright (c) 2014, Esteban Castro Borsani
# Copyright (c) 2014, Jesús Espino García
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from django.conf import settings
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
# Grab the emojis (+800) here: https://github.com/arvida/emoji-cheat-sheet.com
# This **crazy long** list was generated by walking through the emojis.png
emojis_path = "{}://{}/static/img/emojis/".format(settings.SITES["api"]["scheme"], settings.SITES["api"]["domain"])
emojis_set = {
"+1", "-1", "100", "1234", "8ball", "a", "ab", "abc", "abcd", "accept", "aerial_tramway", "airplane",
"alarm_clock", "alien", "ambulance", "anchor", "angel", "anger", "angry", "anguished", "ant", "apple",
"aquarius", "aries", "arrows_clockwise", "arrows_counterclockwise", "arrow_backward", "arrow_double_down",
"arrow_double_up", "arrow_down", "arrow_down_small", "arrow_forward", "arrow_heading_down", "arrow_heading_up",
"arrow_left", "arrow_lower_left", "arrow_lower_right", "arrow_right", "arrow_right_hook", "arrow_up",
"arrow_upper_left", "arrow_upper_right", "arrow_up_down", "arrow_up_small", "art", "articulated_lorry",
"astonished", "atm", "b", "baby", "baby_bottle", "baby_chick", "baby_symbol", "baggage_claim", "balloon",
"ballot_box_with_check", "bamboo", "banana", "bangbang", "bank", "barber", "bar_chart", "baseball", "basketball",
"bath", "bathtub", "battery", "bear", "bee", "beer", "beers", "beetle", "beginner", "bell", "bento", "bicyclist",
"bike", "bikini", "bird", "birthday", "black_circle", "black_joker", "black_nib", "black_square",
"black_square_button", "blossom", "blowfish", "blue_book", "blue_car", "blue_heart", "blush", "boar", "boat",
"bomb", "book", "bookmark", "bookmark_tabs", "books", "boom", "boot", "bouquet", "bow", "bowling", "bowtie",
"boy", "bread", "bride_with_veil", "bridge_at_night", "briefcase", "broken_heart", "bug", "bulb",
"bullettrain_front", "bullettrain_side", "bus", "busstop", "busts_in_silhouette", "bust_in_silhouette",
"cactus", "cake", "calendar", "calling", "camel", "camera", "cancer", "candy", "capital_abcd", "capricorn",
"car", "card_index", "carousel_horse", "cat", "cat2", "cd", "chart", "chart_with_downwards_trend",
"chart_with_upwards_trend", "checkered_flag", "cherries", "cherry_blossom", "chestnut", "chicken",
"children_crossing", "chocolate_bar", "christmas_tree", "church", "cinema", "circus_tent", "city_sunrise",
"city_sunset", "cl", "clap", "clapper", "clipboard", "clock1", "clock10", "clock1030", "clock11", "clock1130",
"clock12", "clock1230", "clock130", "clock2", "clock230", "clock3", "clock330", "clock4", "clock430", "clock5",
"clock530", "clock6", "clock630", "clock7", "clock730", "clock8", "clock830", "clock9", "clock930",
"closed_book", "closed_lock_with_key", "closed_umbrella", "cloud", "clubs", "cn", "cocktail", "coffee",
"cold_sweat", "collision", "computer", "confetti_ball", "confounded", "confused", "congratulations",
"construction", "construction_worker", "convenience_store", "cookie", "cool", "cop", "copyright", "corn",
"couple", "couplekiss", "couple_with_heart", "cow", "cow2", "credit_card", "crocodile", "crossed_flags",
"crown", "cry", "crying_cat_face", "crystal_ball", "cupid", "curly_loop", "currency_exchange", "curry",
"custard", "customs", "cyclone", "dancer", "dancers", "dango", "dart", "dash", "date", "de", "deciduous_tree",
"department_store", "diamonds", "diamond_shape_with_a_dot_inside", "disappointed", "disappointed_relieved",
"dizzy", "dizzy_face", "dog", "dog2", "dollar", "dolls", "dolphin", "donut", "door", "doughnut",
"do_not_litter", "dragon", "dragon_face", "dress", "dromedary_camel", "droplet", "dvd", "e-mail", "ear",
"earth_africa", "earth_americas", "earth_asia", "ear_of_rice", "egg", "eggplant", "eight",
"eight_pointed_black_star", "eight_spoked_asterisk", "electric_plug", "elephant", "email", "end", "envelope",
"es", "euro", "european_castle", "european_post_office", "evergreen_tree", "exclamation", "expressionless",
"eyeglasses", "eyes", "facepunch", "factory", "fallen_leaf", "family", "fast_forward", "fax", "fearful",
"feelsgood", "feet", "ferris_wheel", "file_folder", "finnadie", "fire", "fireworks", "fire_engine",
"first_quarter_moon", "first_quarter_moon_with_face", "fish", "fishing_pole_and_fish", "fish_cake", "fist",
"five", "flags", "flashlight", "floppy_disk", "flower_playing_cards", "flushed", "foggy", "football",
"fork_and_knife", "fountain", "four", "four_leaf_clover", "fr", "free", "fried_shrimp", "fries", "frog",
"frowning", "fu", "fuelpump", "full_moon", "full_moon_with_face", "game_die", "gb", "gem", "gemini", "ghost",
"gift", "gift_heart", "girl", "globe_with_meridians", "goat", "goberserk", "godmode", "golf", "grapes",
"green_apple", "green_book", "green_heart", "grey_exclamation", "grey_question", "grimacing", "grin",
"grinning", "guardsman", "guitar", "gun", "haircut", "hamburger", "hammer", "hamster", "hand", "handbag",
"hankey", "hash", "hatched_chick", "hatching_chick", "headphones", "heart", "heartbeat", "heartpulse",
"hearts", "heart_decoration", "heart_eyes", "heart_eyes_cat", "hear_no_evil", "heavy_check_mark",
"heavy_division_sign", "heavy_dollar_sign", "heavy_exclamation_mark", "heavy_minus_sign",
"heavy_multiplication_x", "heavy_plus_sign", "helicopter", "herb", "hibiscus", "high_brightness",
"high_heel", "hocho", "honeybee", "honey_pot", "horse", "horse_racing", "hospital", "hotel", "hotsprings",
"hourglass", "hourglass_flowing_sand", "house", "house_with_garden", "hurtrealbad", "hushed", "icecream",
"ice_cream", "id", "ideograph_advantage", "imp", "inbox_tray", "incoming_envelope", "information_desk_person",
"information_source", "innocent", "interrobang", "iphone", "it", "izakaya_lantern", "jack_o_lantern", "japan",
"japanese_castle", "japanese_goblin", "japanese_ogre", "jeans", "joy", "joy_cat", "jp", "key", "keycap_ten",
"kimono", "kiss", "kissing", "kissing_cat", "kissing_closed_eyes", "kissing_face", "kissing_heart",
"kissing_smiling_eyes", "koala", "koko", "kr", "large_blue_circle", "large_blue_diamond", "large_orange_diamond",
"last_quarter_moon", "last_quarter_moon_with_face", "laughing", "leaves", "ledger", "leftwards_arrow_with_hook",
"left_luggage", "left_right_arrow", "lemon", "leo", "leopard", "libra", "light_rail", "link", "lips",
"lipstick", "lock", "lock_with_ink_pen", "lollipop", "loop", "loudspeaker", "love_hotel", "love_letter",
"low_brightness", "m", "mag", "mag_right", "mahjong", "mailbox", "mailbox_closed", "mailbox_with_mail",
"mailbox_with_no_mail", "man", "mans_shoe", "man_with_gua_pi_mao", "man_with_turban", "maple_leaf", "mask",
"massage", "meat_on_bone", "mega", "melon", "memo", "mens", "metal", "metro", "microphone", "microscope",
"milky_way", "minibus", "minidisc", "mobile_phone_off", "moneybag", "money_with_wings", "monkey", "monkey_face",
"monorail", "moon", "mortar_board", "mountain_bicyclist", "mountain_cableway", "mountain_railway",
"mount_fuji", "mouse", "mouse2", "movie_camera", "moyai", "muscle", "mushroom", "musical_keyboard",
"musical_note", "musical_score", "mute", "nail_care", "name_badge", "neckbeard", "necktie",
"negative_squared_cross_mark", "neutral_face", "new", "newspaper", "new_moon", "new_moon_with_face",
"ng", "nine", "non-potable_water", "nose", "notebook", "notebook_with_decorative_cover", "notes", "no_bell",
"no_bicycles", "no_entry", "no_entry_sign", "no_good", "no_mobile_phones", "no_mouth", "no_pedestrians",
"no_smoking", "nut_and_bolt", "o", "o2", "ocean", "octocat", "octopus", "oden", "office", "ok", "ok_hand",
"ok_woman", "older_man", "older_woman", "on", "oncoming_automobile", "oncoming_bus", "oncoming_police_car",
"oncoming_taxi", "one", "open_file_folder", "open_hands", "open_mouth", "ophiuchus", "orange_book",
"outbox_tray", "ox", "pager", "page_facing_up", "page_with_curl", "palm_tree", "panda_face", "paperclip",
"parking", "partly_sunny", "part_alternation_mark", "passport_control", "paw_prints", "peach", "pear",
"pencil", "pencil2", "penguin", "pensive", "performing_arts", "persevere", "person_frowning",
"person_with_blond_hair", "person_with_pouting_face", "phone", "pig", "pig2", "pig_nose", "pill",
"pineapple", "pisces", "pizza", "plus1", "point_down", "point_left", "point_right", "point_up",
"point_up_2", "police_car", "poodle", "poop", "postal_horn", "postbox", "post_office", "potable_water",
"pouch", "poultry_leg", "pound", "pouting_cat", "pray", "princess", "punch", "purple_heart", "purse",
"pushpin", "put_litter_in_its_place", "question", "rabbit", "rabbit2", "racehorse", "radio", "radio_button",
"rage", "rage1", "rage2", "rage3", "rage4", "railway_car", "rainbow", "raised_hand", "raised_hands",
"raising_hand", "ram", "ramen", "rat", "recycle", "red_car", "red_circle", "registered", "relaxed",
"relieved", "repeat", "repeat_one", "restroom", "revolving_hearts", "rewind", "ribbon", "rice", "rice_ball",
"rice_cracker", "rice_scene", "ring", "rocket", "roller_coaster", "rooster", "rose", "rotating_light",
"round_pushpin", "rowboat", "ru", "rugby_football", "runner", "running", "running_shirt_with_sash", "sa",
"sagittarius", "sailboat", "sake", "sandal", "santa", "satellite", "satisfied", "saxophone", "school",
"school_satchel", "scissors", "scorpius", "scream", "scream_cat", "scroll", "seat", "secret", "seedling",
"see_no_evil", "seven", "shaved_ice", "sheep", "shell", "ship", "shipit", "shirt", "shit", "shoe", "shower",
"signal_strength", "six", "six_pointed_star", "ski", "skull", "sleeping", "sleepy", "slot_machine",
"small_blue_diamond", "small_orange_diamond", "small_red_triangle", "small_red_triangle_down", "smile",
"smiley", "smiley_cat", "smile_cat", "smiling_imp", "smirk", "smirk_cat", "smoking", "snail", "snake",
"snowboarder", "snowflake", "snowman", "sob", "soccer", "soon", "sos", "sound", "space_invader", "spades",
"spaghetti", "sparkler", "sparkles", "sparkling_heart", "speaker", "speak_no_evil", "speech_balloon",
"speedboat", "squirrel", "star", "star2", "stars", "station", "statue_of_liberty", "steam_locomotive",
"stew", "straight_ruler", "strawberry", "stuck_out_tongue", "stuck_out_tongue_closed_eyes",
"stuck_out_tongue_winking_eye", "sunflower", "sunglasses", "sunny", "sunrise", "sunrise_over_mountains",
"sun_with_face", "surfer", "sushi", "suspect", "suspension_railway", "sweat", "sweat_drops", "sweat_smile",
"sweet_potato", "swimmer", "symbols", "syringe", "tada", "tanabata_tree", "tangerine", "taurus", "taxi",
"tea", "telephone", "telephone_receiver", "telescope", "tennis", "tent", "thought_balloon", "three",
"thumbsdown", "thumbsup", "ticket", "tiger", "tiger2", "tired_face", "tm", "toilet", "tokyo_tower", "tomato",
"tongue", "top", "tophat", "tractor", "traffic_light", "train", "train2", "tram", "triangular_flag_on_post",
"triangular_ruler", "trident", "triumph", "trolleybus", "trollface", "trophy", "tropical_drink",
"tropical_fish", "truck", "trumpet", "tshirt", "tulip", "turtle", "tv", "twisted_rightwards_arrows",
"two", "two_hearts", "two_men_holding_hands", "two_women_holding_hands", "u5272", "u5408", "u55b6",
"u6307", "u6708", "u6709", "u6e80", "u7121", "u7533", "u7981", "u7a7a", "uk", "umbrella", "unamused",
"underage", "unlock", "up", "us", "v", "vertical_traffic_light", "vhs", "vibration_mode", "video_camera",
"video_game", "violin", "virgo", "volcano", "vs", "walking", "waning_crescent_moon", "waning_gibbous_moon",
"warning", "watch", "watermelon", "water_buffalo", "wave", "wavy_dash", "waxing_crescent_moon",
"waxing_gibbous_moon", "wc", "weary", "wedding", "whale", "whale2", "wheelchair", "white_check_mark",
"white_circle", "white_flower", "white_square", "white_square_button", "wind_chime", "wine_glass",
"wink", "wolf", "woman", "womans_clothes", "womans_hat", "womens", "worried", "wrench", "x", "yellow_heart",
"yen", "yum", "zap", "zero", "zzz",
}
class EmojifyExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.preprocessors.add('emojify',
EmojifyPreprocessor(md),
'_end')
class EmojifyPreprocessor(Preprocessor):
def run(self, lines):
pattern = re.compile(':([a-z0-9\+\-_]+):')
new_lines = []
def emojify(match):
emoji = match.group(1)
if emoji not in emojis_set:
return match.group(0)
url = emojis_path + emoji + u'.png'
return ''.format(emoji=emoji, url=url)
for line in lines:
if line.strip():
line = pattern.sub(emojify, line)
new_lines.append(line)
return new_lines
| CoolCloud/taiga-back | taiga/mdrender/extensions/emojify.py | Python | agpl-3.0 | 14,195 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.